Squashed 'third_party/blasfeo/' content from commit 2a828ca
Change-Id: If1c3caa4799b2d4eb287ef83fa17043587ef07a3
git-subtree-dir: third_party/blasfeo
git-subtree-split: 2a828ca5442108c4c58e4b42b061a0469043f6ea
diff --git a/auxiliary/c99/Makefile b/auxiliary/c99/Makefile
new file mode 100644
index 0000000..6e9ea7b
--- /dev/null
+++ b/auxiliary/c99/Makefile
@@ -0,0 +1,77 @@
+###################################################################################################
+# #
+# This file is part of BLASFEO. #
+# #
+# BLASFEO -- BLAS For Embedded Optimization. #
+# Copyright (C) 2016-2017 by Gianluca Frison. #
+# Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. #
+# All rights reserved. #
+# #
+# HPMPC is free software; you can redistribute it and/or #
+# modify it under the terms of the GNU Lesser General Public #
+# License as published by the Free Software Foundation; either #
+# version 2.1 of the License, or (at your option) any later version. #
+# #
+# HPMPC is distributed in the hope that it will be useful, #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
+# See the GNU Lesser General Public License for more details. #
+# #
+# You should have received a copy of the GNU Lesser General Public #
+# License along with HPMPC; if not, write to the Free Software #
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA #
+# #
+# Author: Gianluca Frison, giaf (at) dtu.dk #
+# gianluca.frison (at) imtek.uni-freiburg.de #
+# #
+###################################################################################################
+
+include ../../Makefile.rule
+
+OBJS =
+
+ifeq ($(LA), HIGH_PERFORMANCE)
+
+ifeq ($(TARGET), X64_INTEL_HASWELL)
+OBJS +=
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), X64_INTEL_SANDY_BRIDGE)
+OBJS +=
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), X64_INTEL_CORE)
+OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), X64_AMD_BULLDOZER)
+OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), ARMV8A_ARM_CORTEX_A57)
+OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), ARMV7A_ARM_CORTEX_A15)
+OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
+OBJS += kernel_sgetr_lib4.o
+endif
+
+ifeq ($(TARGET), GENERIC)
+OBJS += kernel_dgecp_lib4.o kernel_dgetr_lib4.o
+OBJS += kernel_sgetr_lib4.o
+endif
+
+else # LA_REFERENCE | LA_BLAS
+
+endif # LA choice
+
+obj: $(OBJS)
+
+clean:
+ rm -f *.o
diff --git a/auxiliary/c99/kernel_dgecp_lib4.c b/auxiliary/c99/kernel_dgecp_lib4.c
new file mode 100644
index 0000000..e883072
--- /dev/null
+++ b/auxiliary/c99/kernel_dgecp_lib4.c
@@ -0,0 +1,1261 @@
+/**************************************************************************************************
+* *
+* This file is part of BLASFEO. *
+* *
+* BLASFEO -- BLAS For Embedded Optimization. *
+* Copyright (C) 2016-2017 by Gianluca Frison. *
+* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. *
+* All rights reserved. *
+* *
+* HPMPC is free software; you can redistribute it and/or *
+* modify it under the terms of the GNU Lesser General Public *
+* License as published by the Free Software Foundation; either *
+* version 2.1 of the License, or (at your option) any later version. *
+* *
+* HPMPC is distributed in the hope that it will be useful, *
+* but WITHOUT ANY WARRANTY; without even the implied warranty of *
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU Lesser General Public License for more details. *
+* *
+* You should have received a copy of the GNU Lesser General Public *
+* License along with HPMPC; if not, write to the Free Software *
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
+* *
+* Author: Gianluca Frison, giaf (at) dtu.dk *
+* gianluca.frison (at) imtek.uni-freiburg.de *
+* *
+**************************************************************************************************/
+
+
+
+// both A and B are aligned to 256-bit boundaries
+void kernel_dgecp_4_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+ B[3+bs*0] = alpha*A[3+bs*0];
+
+ B[0+bs*1] = alpha*A[0+bs*1];
+ B[1+bs*1] = alpha*A[1+bs*1];
+ B[2+bs*1] = alpha*A[2+bs*1];
+ B[3+bs*1] = alpha*A[3+bs*1];
+
+ B[0+bs*2] = alpha*A[0+bs*2];
+ B[1+bs*2] = alpha*A[1+bs*2];
+ B[2+bs*2] = alpha*A[2+bs*2];
+ B[3+bs*2] = alpha*A[3+bs*2];
+
+ B[0+bs*3] = alpha*A[0+bs*3];
+ B[1+bs*3] = alpha*A[1+bs*3];
+ B[2+bs*3] = alpha*A[2+bs*3];
+ B[3+bs*3] = alpha*A[3+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+ B[3+bs*0] = alpha*A[3+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 3x3 triangle
+
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+ B[3+bs*0] = alpha*A[3+bs*0];
+
+ B[2+bs*1] = alpha*A[2+bs*1];
+ B[3+bs*1] = alpha*A[3+bs*1];
+
+ B[3+bs*2] = alpha*A[3+bs*2];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
+void kernel_dgecp_4_1_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[1+bs*0];
+ B[1+bs*0] = alpha*A0[2+bs*0];
+ B[2+bs*0] = alpha*A0[3+bs*0];
+ B[3+bs*0] = alpha*A1[0+bs*0];
+
+ B[0+bs*1] = alpha*A0[1+bs*1];
+ B[1+bs*1] = alpha*A0[2+bs*1];
+ B[2+bs*1] = alpha*A0[3+bs*1];
+ B[3+bs*1] = alpha*A1[0+bs*1];
+
+ B[0+bs*2] = alpha*A0[1+bs*2];
+ B[1+bs*2] = alpha*A0[2+bs*2];
+ B[2+bs*2] = alpha*A0[3+bs*2];
+ B[3+bs*2] = alpha*A1[0+bs*2];
+
+ B[0+bs*3] = alpha*A0[1+bs*3];
+ B[1+bs*3] = alpha*A0[2+bs*3];
+ B[2+bs*3] = alpha*A0[3+bs*3];
+ B[3+bs*3] = alpha*A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[1+bs*0];
+ B[1+bs*0] = alpha*A0[2+bs*0];
+ B[2+bs*0] = alpha*A0[3+bs*0];
+ B[3+bs*0] = alpha*A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 3x3 triangle
+
+ B[1+0*bs] = alpha*A0[2+0*bs];
+ B[2+0*bs] = alpha*A0[3+0*bs];
+ B[3+0*bs] = alpha*A1[0+0*bs];
+
+ B[2+1*bs] = alpha*A0[3+1*bs];
+ B[3+1*bs] = alpha*A1[0+1*bs];
+
+ B[3+2*bs] = alpha*A1[0+2*bs];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
+void kernel_dgecp_4_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[2+bs*0];
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+ B[3+bs*0] = alpha*A1[1+bs*0];
+
+ B[0+bs*1] = alpha*A0[2+bs*1];
+ B[1+bs*1] = alpha*A0[3+bs*1];
+ B[2+bs*1] = alpha*A1[0+bs*1];
+ B[3+bs*1] = alpha*A1[1+bs*1];
+
+ B[0+bs*2] = alpha*A0[2+bs*2];
+ B[1+bs*2] = alpha*A0[3+bs*2];
+ B[2+bs*2] = alpha*A1[0+bs*2];
+ B[3+bs*2] = alpha*A1[1+bs*2];
+
+ B[0+bs*3] = alpha*A0[2+bs*3];
+ B[1+bs*3] = alpha*A0[3+bs*3];
+ B[2+bs*3] = alpha*A1[0+bs*3];
+ B[3+bs*3] = alpha*A1[1+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[2+bs*0];
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+ B[3+bs*0] = alpha*A1[1+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 3x3 triangle}
+
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+ B[3+bs*0] = alpha*A1[1+bs*0];
+
+ B[2+bs*1] = alpha*A1[0+bs*1];
+ B[3+bs*1] = alpha*A1[1+bs*1];
+
+ B[3+bs*2] = alpha*A1[1+bs*2];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
+void kernel_dgecp_4_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+ B[3+bs*0] = alpha*A1[2+bs*0];
+
+ B[0+bs*1] = alpha*A0[3+bs*1];
+ B[1+bs*1] = alpha*A1[0+bs*1];
+ B[2+bs*1] = alpha*A1[1+bs*1];
+ B[3+bs*1] = alpha*A1[2+bs*1];
+
+ B[0+bs*2] = alpha*A0[3+bs*2];
+ B[1+bs*2] = alpha*A1[0+bs*2];
+ B[2+bs*2] = alpha*A1[1+bs*2];
+ B[3+bs*2] = alpha*A1[2+bs*2];
+
+ B[0+bs*3] = alpha*A0[3+bs*3];
+ B[1+bs*3] = alpha*A1[0+bs*3];
+ B[2+bs*3] = alpha*A1[1+bs*3];
+ B[3+bs*3] = alpha*A1[2+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+ B[3+bs*0] = alpha*A1[2+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 3x3 triangle
+
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+ B[3+bs*0] = alpha*A1[2+bs*0];
+
+ B[2+bs*1] = alpha*A1[1+bs*1];
+ B[3+bs*1] = alpha*A1[2+bs*1];
+
+ B[3+bs*2] = alpha*A1[2+bs*2];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 64-bit boundaries
+void kernel_dgecp_3_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 3-wide + end 2x2 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+
+ B[0+bs*1] = alpha*A[0+bs*1];
+ B[1+bs*1] = alpha*A[1+bs*1];
+ B[2+bs*1] = alpha*A[2+bs*1];
+
+ B[0+bs*2] = alpha*A[0+bs*2];
+ B[1+bs*2] = alpha*A[1+bs*2];
+ B[2+bs*2] = alpha*A[2+bs*2];
+
+ B[0+bs*3] = alpha*A[0+bs*3];
+ B[1+bs*3] = alpha*A[1+bs*3];
+ B[2+bs*3] = alpha*A[2+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 2x2 triangle
+
+ B[1+bs*0] = alpha*A[1+bs*0];
+ B[2+bs*0] = alpha*A[2+bs*0];
+
+ B[2+bs*1] = alpha*A[2+bs*1];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
+void kernel_dgecp_3_2_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 3-wide + end 2x2 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[2+bs*0];
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+
+ B[0+bs*1] = alpha*A0[2+bs*1];
+ B[1+bs*1] = alpha*A0[3+bs*1];
+ B[2+bs*1] = alpha*A1[0+bs*1];
+
+ B[0+bs*2] = alpha*A0[2+bs*2];
+ B[1+bs*2] = alpha*A0[3+bs*2];
+ B[2+bs*2] = alpha*A1[0+bs*2];
+
+ B[0+bs*3] = alpha*A0[2+bs*3];
+ B[1+bs*3] = alpha*A0[3+bs*3];
+ B[2+bs*3] = alpha*A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[2+bs*0];
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 2x2 triangle
+
+ B[1+bs*0] = alpha*A0[3+bs*0];
+ B[2+bs*0] = alpha*A1[0+bs*0];
+
+ B[2+bs*1] = alpha*A1[0+bs*1];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
+void kernel_dgecp_3_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 3-wide + end 2x2 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+
+ B[0+bs*1] = alpha*A0[3+bs*1];
+ B[1+bs*1] = alpha*A1[0+bs*1];
+ B[2+bs*1] = alpha*A1[1+bs*1];
+
+ B[0+bs*2] = alpha*A0[3+bs*2];
+ B[1+bs*2] = alpha*A1[0+bs*2];
+ B[2+bs*2] = alpha*A1[1+bs*2];
+
+ B[0+bs*3] = alpha*A0[3+bs*3];
+ B[1+bs*3] = alpha*A1[0+bs*3];
+ B[2+bs*3] = alpha*A1[1+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 2x2 triangle
+
+ B[1+bs*0] = alpha*A1[0+bs*0];
+ B[2+bs*0] = alpha*A1[1+bs*0];
+
+ B[2+bs*1] = alpha*A1[1+bs*1];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 64-bit boundaries
+void kernel_dgecp_2_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 2-wide + end 1x1 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+
+ B[0+bs*1] = alpha*A[0+bs*1];
+ B[1+bs*1] = alpha*A[1+bs*1];
+
+ B[0+bs*2] = alpha*A[0+bs*2];
+ B[1+bs*2] = alpha*A[1+bs*2];
+
+ B[0+bs*3] = alpha*A[0+bs*3];
+ B[1+bs*3] = alpha*A[1+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A[0+bs*0];
+ B[1+bs*0] = alpha*A[1+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 1x1 triangle
+
+ B[1+bs*0] = alpha*A[1+bs*0];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
+void kernel_dgecp_2_3_lib4(int tri, int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 2-wide + end 1x1 triangle
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+
+ B[0+bs*1] = alpha*A0[3+bs*1];
+ B[1+bs*1] = alpha*A1[0+bs*1];
+
+ B[0+bs*2] = alpha*A0[3+bs*2];
+ B[1+bs*2] = alpha*A1[0+bs*2];
+
+ B[0+bs*3] = alpha*A0[3+bs*3];
+ B[1+bs*3] = alpha*A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A0[3+bs*0];
+ B[1+bs*0] = alpha*A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ if(tri==1)
+ {
+ // 1x1 triangle
+
+ B[1+bs*0] = alpha*A1[0+bs*0];
+
+ }
+
+ }
+
+
+
+// both A and B are aligned 64-bit boundaries
+void kernel_dgecp_1_0_lib4(int tri, int kmax, double alpha, double *A, double *B)
+ {
+
+ if(tri==1)
+ {
+ // A and C are lower triangular
+ // kmax+1 1-wide
+
+ kmax += 1;
+ }
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] = alpha*A[0+bs*0];
+
+ B[0+bs*1] = alpha*A[0+bs*1];
+
+ B[0+bs*2] = alpha*A[0+bs*2];
+
+ B[0+bs*3] = alpha*A[0+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] = alpha*A[0+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+
+// both A and B are aligned to 256-bit boundaries
+void kernel_dgead_4_0_lib4(int kmax, double alpha, double *A, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+ B[2+bs*0] += alpha * A[2+bs*0];
+ B[3+bs*0] += alpha * A[3+bs*0];
+
+ B[0+bs*1] += alpha * A[0+bs*1];
+ B[1+bs*1] += alpha * A[1+bs*1];
+ B[2+bs*1] += alpha * A[2+bs*1];
+ B[3+bs*1] += alpha * A[3+bs*1];
+
+ B[0+bs*2] += alpha * A[0+bs*2];
+ B[1+bs*2] += alpha * A[1+bs*2];
+ B[2+bs*2] += alpha * A[2+bs*2];
+ B[3+bs*2] += alpha * A[3+bs*2];
+
+ B[0+bs*3] += alpha * A[0+bs*3];
+ B[1+bs*3] += alpha * A[1+bs*3];
+ B[2+bs*3] += alpha * A[2+bs*3];
+ B[3+bs*3] += alpha * A[3+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+ B[2+bs*0] += alpha * A[2+bs*0];
+ B[3+bs*0] += alpha * A[3+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 1 element of A must be skipped
+void kernel_dgead_4_1_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[1+bs*0];
+ B[1+bs*0] += alpha * A0[2+bs*0];
+ B[2+bs*0] += alpha * A0[3+bs*0];
+ B[3+bs*0] += alpha * A1[0+bs*0];
+
+ B[0+bs*1] += alpha * A0[1+bs*1];
+ B[1+bs*1] += alpha * A0[2+bs*1];
+ B[2+bs*1] += alpha * A0[3+bs*1];
+ B[3+bs*1] += alpha * A1[0+bs*1];
+
+ B[0+bs*2] += alpha * A0[1+bs*2];
+ B[1+bs*2] += alpha * A0[2+bs*2];
+ B[2+bs*2] += alpha * A0[3+bs*2];
+ B[3+bs*2] += alpha * A1[0+bs*2];
+
+ B[0+bs*3] += alpha * A0[1+bs*3];
+ B[1+bs*3] += alpha * A0[2+bs*3];
+ B[2+bs*3] += alpha * A0[3+bs*3];
+ B[3+bs*3] += alpha * A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[1+bs*0];
+ B[1+bs*0] += alpha * A0[2+bs*0];
+ B[2+bs*0] += alpha * A0[3+bs*0];
+ B[3+bs*0] += alpha * A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
+void kernel_dgead_4_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[2+bs*0];
+ B[1+bs*0] += alpha * A0[3+bs*0];
+ B[2+bs*0] += alpha * A1[0+bs*0];
+ B[3+bs*0] += alpha * A1[1+bs*0];
+
+ B[0+bs*1] += alpha * A0[2+bs*1];
+ B[1+bs*1] += alpha * A0[3+bs*1];
+ B[2+bs*1] += alpha * A1[0+bs*1];
+ B[3+bs*1] += alpha * A1[1+bs*1];
+
+ B[0+bs*2] += alpha * A0[2+bs*2];
+ B[1+bs*2] += alpha * A0[3+bs*2];
+ B[2+bs*2] += alpha * A1[0+bs*2];
+ B[3+bs*2] += alpha * A1[1+bs*2];
+
+ B[0+bs*3] += alpha * A0[2+bs*3];
+ B[1+bs*3] += alpha * A0[3+bs*3];
+ B[2+bs*3] += alpha * A1[0+bs*3];
+ B[3+bs*3] += alpha * A1[1+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[2+bs*0];
+ B[1+bs*0] += alpha * A0[3+bs*0];
+ B[2+bs*0] += alpha * A1[0+bs*0];
+ B[3+bs*0] += alpha * A1[1+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
+void kernel_dgead_4_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+ B[2+bs*0] += alpha * A1[1+bs*0];
+ B[3+bs*0] += alpha * A1[2+bs*0];
+
+ B[0+bs*1] += alpha * A0[3+bs*1];
+ B[1+bs*1] += alpha * A1[0+bs*1];
+ B[2+bs*1] += alpha * A1[1+bs*1];
+ B[3+bs*1] += alpha * A1[2+bs*1];
+
+ B[0+bs*2] += alpha * A0[3+bs*2];
+ B[1+bs*2] += alpha * A1[0+bs*2];
+ B[2+bs*2] += alpha * A1[1+bs*2];
+ B[3+bs*2] += alpha * A1[2+bs*2];
+
+ B[0+bs*3] += alpha * A0[3+bs*3];
+ B[1+bs*3] += alpha * A1[0+bs*3];
+ B[2+bs*3] += alpha * A1[1+bs*3];
+ B[3+bs*3] += alpha * A1[2+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+ B[2+bs*0] += alpha * A1[1+bs*0];
+ B[3+bs*0] += alpha * A1[2+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 64-bit boundaries
+void kernel_dgead_3_0_lib4(int kmax, double alpha, double *A, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+ B[2+bs*0] += alpha * A[2+bs*0];
+
+ B[0+bs*1] += alpha * A[0+bs*1];
+ B[1+bs*1] += alpha * A[1+bs*1];
+ B[2+bs*1] += alpha * A[2+bs*1];
+
+ B[0+bs*2] += alpha * A[0+bs*2];
+ B[1+bs*2] += alpha * A[1+bs*2];
+ B[2+bs*2] += alpha * A[2+bs*2];
+
+ B[0+bs*3] += alpha * A[0+bs*3];
+ B[1+bs*3] += alpha * A[1+bs*3];
+ B[2+bs*3] += alpha * A[2+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+ B[2+bs*0] += alpha * A[2+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 2 elements of A must be skipped
+void kernel_dgead_3_2_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[2+bs*0];
+ B[1+bs*0] += alpha * A0[3+bs*0];
+ B[2+bs*0] += alpha * A1[0+bs*0];
+
+ B[0+bs*1] += alpha * A0[2+bs*1];
+ B[1+bs*1] += alpha * A0[3+bs*1];
+ B[2+bs*1] += alpha * A1[0+bs*1];
+
+ B[0+bs*2] += alpha * A0[2+bs*2];
+ B[1+bs*2] += alpha * A0[3+bs*2];
+ B[2+bs*2] += alpha * A1[0+bs*2];
+
+ B[0+bs*3] += alpha * A0[2+bs*3];
+ B[1+bs*3] += alpha * A0[3+bs*3];
+ B[2+bs*3] += alpha * A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[2+bs*0];
+ B[1+bs*0] += alpha * A0[3+bs*0];
+ B[2+bs*0] += alpha * A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 256-bit boundaries, 3 elements of A must be skipped
+void kernel_dgead_3_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+ B[2+bs*0] += alpha * A1[1+bs*0];
+
+ B[0+bs*1] += alpha * A0[3+bs*1];
+ B[1+bs*1] += alpha * A1[0+bs*1];
+ B[2+bs*1] += alpha * A1[1+bs*1];
+
+ B[0+bs*2] += alpha * A0[3+bs*2];
+ B[1+bs*2] += alpha * A1[0+bs*2];
+ B[2+bs*2] += alpha * A1[1+bs*2];
+
+ B[0+bs*3] += alpha * A0[3+bs*3];
+ B[1+bs*3] += alpha * A1[0+bs*3];
+ B[2+bs*3] += alpha * A1[1+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+ B[2+bs*0] += alpha * A1[1+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 64-bit boundaries
+void kernel_dgead_2_0_lib4(int kmax, double alpha, double *A, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+
+ B[0+bs*1] += alpha * A[0+bs*1];
+ B[1+bs*1] += alpha * A[1+bs*1];
+
+ B[0+bs*2] += alpha * A[0+bs*2];
+ B[1+bs*2] += alpha * A[1+bs*2];
+
+ B[0+bs*3] += alpha * A[0+bs*3];
+ B[1+bs*3] += alpha * A[1+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A[0+bs*0];
+ B[1+bs*0] += alpha * A[1+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned to 128-bit boundaries, 3 elements of A must be skipped
+void kernel_dgead_2_3_lib4(int kmax, double alpha, double *A0, int sda, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ double *A1 = A0 + bs*sda;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+
+ B[0+bs*1] += alpha * A0[3+bs*1];
+ B[1+bs*1] += alpha * A1[0+bs*1];
+
+ B[0+bs*2] += alpha * A0[3+bs*2];
+ B[1+bs*2] += alpha * A1[0+bs*2];
+
+ B[0+bs*3] += alpha * A0[3+bs*3];
+ B[1+bs*3] += alpha * A1[0+bs*3];
+
+ A0 += 16;
+ A1 += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A0[3+bs*0];
+ B[1+bs*0] += alpha * A1[0+bs*0];
+
+ A0 += 4;
+ A1 += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+// both A and B are aligned 64-bit boundaries
+void kernel_dgead_1_0_lib4(int kmax, double alpha, double *A, double *B)
+ {
+
+ if(kmax<=0)
+ return;
+
+ const int bs = 4;
+
+ int k;
+
+ for(k=0; k<kmax-3; k+=4)
+ {
+ B[0+bs*0] += alpha * A[0+bs*0];
+
+ B[0+bs*1] += alpha * A[0+bs*1];
+
+ B[0+bs*2] += alpha * A[0+bs*2];
+
+ B[0+bs*3] += alpha * A[0+bs*3];
+
+ A += 16;
+ B += 16;
+
+ }
+ for(; k<kmax; k++)
+ {
+
+ B[0+bs*0] += alpha * A[0+bs*0];
+
+ A += 4;
+ B += 4;
+
+ }
+
+ }
+
+
+
+
diff --git a/auxiliary/c99/kernel_dgetr_lib4.c b/auxiliary/c99/kernel_dgetr_lib4.c
new file mode 100644
index 0000000..7d62277
--- /dev/null
+++ b/auxiliary/c99/kernel_dgetr_lib4.c
@@ -0,0 +1,414 @@
+/**************************************************************************************************
+* *
+* This file is part of BLASFEO. *
+* *
+* BLASFEO -- BLAS For Embedded Optimization. *
+* Copyright (C) 2016-2017 by Gianluca Frison. *
+* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. *
+* All rights reserved. *
+* *
+* HPMPC is free software; you can redistribute it and/or *
+* modify it under the terms of the GNU Lesser General Public *
+* License as published by the Free Software Foundation; either *
+* version 2.1 of the License, or (at your option) any later version. *
+* *
+* HPMPC is distributed in the hope that it will be useful, *
+* but WITHOUT ANY WARRANTY; without even the implied warranty of *
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU Lesser General Public License for more details. *
+* *
+* You should have received a copy of the GNU Lesser General Public *
+* License along with HPMPC; if not, write to the Free Software *
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
+* *
+* Author: Gianluca Frison, giaf (at) dtu.dk *
+* gianluca.frison (at) imtek.uni-freiburg.de *
+* *
+**************************************************************************************************/
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_dgetr_4_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+ C[2+bs*2] = alpha * A[2+bs*2];
+ C[2+bs*3] = alpha * A[3+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+ C[3+bs*2] = alpha * A[2+bs*3];
+ C[3+bs*3] = alpha * A[3+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 3x3 triangle
+ kna = (bs-(bs-kna+kmax)%bs)%bs;
+
+ if(kna==1)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
+ C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
+ C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
+ }
+ else if(kna==2)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+ C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
+ }
+ else
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+ C[2+bs*3] = alpha * A[3+bs*2];
+ }
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_dgetr_3_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 3-wide + end 2x2 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+ C[1+bs*2] = alpha * A[2+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+ C[2+bs*2] = alpha * A[2+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+ C[3+bs*2] = alpha * A[2+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 2x2 triangle
+ kna = (bs-(bs-kna+kmax)%bs)%bs;
+
+ if(kna==1)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
+ }
+ else
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ }
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_dgetr_2_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 2-wide + end 1x1 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 1x1 triangle
+ C[0+bs*1] = alpha * A[1+bs*0];
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_dgetr_1_lib4(int tri, int kmax, int kna, double alpha, double *A, double *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 1-wide
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ }
+
+
+
+// transposed of general matrices, read across panels, write along panels
+void kernel_dgetr_4_0_lib4(int kmax, double *A, int sda, double *B)
+ {
+ const int ps = 4;
+ int k;
+ for(k=0; k<kmax-3; k+=4)
+ {
+ //
+ B[0+ps*0] = A[0+ps*0];
+ B[0+ps*1] = A[1+ps*0];
+ B[0+ps*2] = A[2+ps*0];
+ B[0+ps*3] = A[3+ps*0];
+ //
+ B[1+ps*0] = A[0+ps*1];
+ B[1+ps*1] = A[1+ps*1];
+ B[1+ps*2] = A[2+ps*1];
+ B[1+ps*3] = A[3+ps*1];
+ //
+ B[2+ps*0] = A[0+ps*2];
+ B[2+ps*1] = A[1+ps*2];
+ B[2+ps*2] = A[2+ps*2];
+ B[2+ps*3] = A[3+ps*2];
+ //
+ B[3+ps*0] = A[0+ps*3];
+ B[3+ps*1] = A[1+ps*3];
+ B[3+ps*2] = A[2+ps*3];
+ B[3+ps*3] = A[3+ps*3];
+
+ A += ps*sda;
+ B += ps*ps;
+ }
+ for( ; k<kmax; k++)
+ {
+ //
+ B[0+ps*0] = A[0+ps*0];
+ B[1+ps*0] = A[0+ps*1];
+ B[2+ps*0] = A[0+ps*2];
+ B[3+ps*0] = A[0+ps*3];
+
+ A += 1;
+ B += ps;
+ }
+ return;
+ }
+
diff --git a/auxiliary/c99/kernel_sgetr_lib4.c b/auxiliary/c99/kernel_sgetr_lib4.c
new file mode 100644
index 0000000..4cf6fa2
--- /dev/null
+++ b/auxiliary/c99/kernel_sgetr_lib4.c
@@ -0,0 +1,370 @@
+/**************************************************************************************************
+* *
+* This file is part of BLASFEO. *
+* *
+* BLASFEO -- BLAS For Embedded Optimization. *
+* Copyright (C) 2016-2017 by Gianluca Frison. *
+* Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. *
+* All rights reserved. *
+* *
+* HPMPC is free software; you can redistribute it and/or *
+* modify it under the terms of the GNU Lesser General Public *
+* License as published by the Free Software Foundation; either *
+* version 2.1 of the License, or (at your option) any later version. *
+* *
+* HPMPC is distributed in the hope that it will be useful, *
+* but WITHOUT ANY WARRANTY; without even the implied warranty of *
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
+* See the GNU Lesser General Public License for more details. *
+* *
+* You should have received a copy of the GNU Lesser General Public *
+* License along with HPMPC; if not, write to the Free Software *
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
+* *
+* Author: Gianluca Frison, giaf (at) dtu.dk *
+* gianluca.frison (at) imtek.uni-freiburg.de *
+* *
+**************************************************************************************************/
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_sgetr_4_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 4-wide + end 3x3 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+ C[2+bs*2] = alpha * A[2+bs*2];
+ C[2+bs*3] = alpha * A[3+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+ C[3+bs*2] = alpha * A[2+bs*3];
+ C[3+bs*3] = alpha * A[3+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 3x3 triangle
+ kna = (bs-(bs-kna+kmax)%bs)%bs;
+
+ if(kna==1)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
+ C[1+bs*(sdc+2)] = alpha * A[3+bs*1];
+ C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
+ }
+ else if(kna==2)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+ C[2+bs*(sdc+2)] = alpha * A[3+bs*2];
+ }
+ else
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[0+bs*3] = alpha * A[3+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ C[1+bs*3] = alpha * A[3+bs*1];
+ C[2+bs*3] = alpha * A[3+bs*2];
+ }
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_sgetr_3_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 3-wide + end 2x2 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+ C[1+bs*2] = alpha * A[2+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+ C[2+bs*2] = alpha * A[2+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+ C[3+bs*2] = alpha * A[2+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 2x2 triangle
+ kna = (bs-(bs-kna+kmax)%bs)%bs;
+
+ if(kna==1)
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[1+bs*(sdc+1)] = alpha * A[2+bs*1];
+ }
+ else
+ {
+ C[0+bs*1] = alpha * A[1+bs*0];
+ C[0+bs*2] = alpha * A[2+bs*0];
+ C[1+bs*2] = alpha * A[2+bs*1];
+ }
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_sgetr_2_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 2-wide + end 1x1 triangle
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+ C[1+bs*1] = alpha * A[1+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+ C[2+bs*1] = alpha * A[1+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+ C[3+bs*1] = alpha * A[1+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+ C[0+bs*1] = alpha * A[1+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ if(tri==1)
+ {
+ // end 1x1 triangle
+ C[0+bs*1] = alpha * A[1+bs*0];
+ }
+
+ }
+
+
+
+// transposed of general matrices, read along panels, write across panels
+void kernel_sgetr_1_lib4(int tri, int kmax, int kna, float alpha, float *A, float *C, int sdc)
+ {
+
+ if(tri==1)
+ {
+ // A is lower triangular, C is upper triangular
+ // kmax+1 1-wide
+
+ kmax += 1;
+ }
+
+ const int bs = 4;
+
+ int k;
+
+ k = 0;
+
+ if(kmax<kna)
+ goto cleanup_loop;
+
+ if(kna>0)
+ {
+ for( ; k<kna; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+ C += bs*(sdc-1);
+ }
+
+ for( ; k<kmax-3; k+=4)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C[1+bs*0] = alpha * A[0+bs*1];
+
+ C[2+bs*0] = alpha * A[0+bs*2];
+
+ C[3+bs*0] = alpha * A[0+bs*3];
+
+ C += bs*sdc;
+ A += bs*bs;
+ }
+
+ cleanup_loop:
+
+ for( ; k<kmax; k++)
+ {
+ C[0+bs*0] = alpha * A[0+bs*0];
+
+ C += 1;
+ A += bs;
+ }
+
+ }
+
+
+
+