Austin Schuh | 9a24b37 | 2018-01-28 16:12:29 -0800 | [diff] [blame^] | 1 | /************************************************************************************************** |
| 2 | * * |
| 3 | * This file is part of BLASFEO. * |
| 4 | * * |
| 5 | * BLASFEO -- BLAS For Embedded Optimization. * |
| 6 | * Copyright (C) 2016-2017 by Gianluca Frison. * |
| 7 | * Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. * |
| 8 | * All rights reserved. * |
| 9 | * * |
| 10 | * HPMPC is free software; you can redistribute it and/or * |
| 11 | * modify it under the terms of the GNU Lesser General Public * |
| 12 | * License as published by the Free Software Foundation; either * |
| 13 | * version 2.1 of the License, or (at your option) any later version. * |
| 14 | * * |
| 15 | * HPMPC is distributed in the hope that it will be useful, * |
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * |
| 18 | * See the GNU Lesser General Public License for more details. * |
| 19 | * * |
| 20 | * You should have received a copy of the GNU Lesser General Public * |
| 21 | * License along with HPMPC; if not, write to the Free Software * |
| 22 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * |
| 23 | * * |
| 24 | * Author: Gianluca Frison, giaf (at) dtu.dk * |
| 25 | * gianluca.frison (at) imtek.uni-freiburg.de * |
| 26 | * * |
| 27 | **************************************************************************************************/ |
| 28 | |
| 29 | #if defined(OS_LINUX) | defined(OS_MAC) |
| 30 | |
| 31 | //#define STACKSIZE 96 |
| 32 | #define STACKSIZE 64 |
| 33 | #define ARG1 %rdi |
| 34 | #define ARG2 %rsi |
| 35 | #define ARG3 %rdx |
| 36 | #define ARG4 %rcx |
| 37 | #define ARG5 %r8 |
| 38 | #define ARG6 %r9 |
| 39 | #define ARG7 STACKSIZE + 8(%rsp) |
| 40 | #define ARG8 STACKSIZE + 16(%rsp) |
| 41 | #define ARG9 STACKSIZE + 24(%rsp) |
| 42 | #define ARG10 STACKSIZE + 32(%rsp) |
| 43 | #define ARG11 STACKSIZE + 40(%rsp) |
| 44 | #define ARG12 STACKSIZE + 48(%rsp) |
| 45 | #define ARG13 STACKSIZE + 56(%rsp) |
| 46 | #define ARG14 STACKSIZE + 64(%rsp) |
| 47 | #define ARG15 STACKSIZE + 72(%rsp) |
| 48 | #define ARG16 STACKSIZE + 80(%rsp) |
| 49 | #define ARG17 STACKSIZE + 88(%rsp) |
| 50 | #define ARG18 STACKSIZE + 96(%rsp) |
| 51 | #define PROLOGUE \ |
| 52 | subq $STACKSIZE, %rsp; \ |
| 53 | movq %rbx, (%rsp); \ |
| 54 | movq %rbp, 8(%rsp); \ |
| 55 | movq %r12, 16(%rsp); \ |
| 56 | movq %r13, 24(%rsp); \ |
| 57 | movq %r14, 32(%rsp); \ |
| 58 | movq %r15, 40(%rsp); \ |
| 59 | vzeroupper; |
| 60 | #define EPILOGUE \ |
| 61 | vzeroupper; \ |
| 62 | movq (%rsp), %rbx; \ |
| 63 | movq 8(%rsp), %rbp; \ |
| 64 | movq 16(%rsp), %r12; \ |
| 65 | movq 24(%rsp), %r13; \ |
| 66 | movq 32(%rsp), %r14; \ |
| 67 | movq 40(%rsp), %r15; \ |
| 68 | addq $STACKSIZE, %rsp; |
| 69 | |
| 70 | #elif defined(OS_WINDOWS) |
| 71 | |
| 72 | #define STACKSIZE 256 |
| 73 | #define ARG1 %rcx |
| 74 | #define ARG2 %rdx |
| 75 | #define ARG3 %r8 |
| 76 | #define ARG4 %r9 |
| 77 | #define ARG5 STACKSIZE + 40(%rsp) |
| 78 | #define ARG6 STACKSIZE + 48(%rsp) |
| 79 | #define ARG7 STACKSIZE + 56(%rsp) |
| 80 | #define ARG8 STACKSIZE + 64(%rsp) |
| 81 | #define ARG9 STACKSIZE + 72(%rsp) |
| 82 | #define ARG10 STACKSIZE + 80(%rsp) |
| 83 | #define ARG11 STACKSIZE + 88(%rsp) |
| 84 | #define ARG12 STACKSIZE + 96(%rsp) |
| 85 | #define ARG13 STACKSIZE + 104(%rsp) |
| 86 | #define ARG14 STACKSIZE + 112(%rsp) |
| 87 | #define ARG15 STACKSIZE + 120(%rsp) |
| 88 | #define ARG16 STACKSIZE + 128(%rsp) |
| 89 | #define ARG17 STACKSIZE + 136(%rsp) |
| 90 | #define ARG18 STACKSIZE + 144(%rsp) |
| 91 | #define PROLOGUE \ |
| 92 | subq $STACKSIZE, %rsp; \ |
| 93 | movq %rbx, (%rsp); \ |
| 94 | movq %rbp, 8(%rsp); \ |
| 95 | movq %r12, 16(%rsp); \ |
| 96 | movq %r13, 24(%rsp); \ |
| 97 | movq %r14, 32(%rsp); \ |
| 98 | movq %r15, 40(%rsp); \ |
| 99 | movq %rdi, 48(%rsp); \ |
| 100 | movq %rsi, 56(%rsp); \ |
| 101 | vmovups %xmm6, 64(%rsp); \ |
| 102 | vmovups %xmm7, 80(%rsp); \ |
| 103 | vmovups %xmm8, 96(%rsp); \ |
| 104 | vmovups %xmm9, 112(%rsp); \ |
| 105 | vmovups %xmm10, 128(%rsp); \ |
| 106 | vmovups %xmm11, 144(%rsp); \ |
| 107 | vmovups %xmm12, 160(%rsp); \ |
| 108 | vmovups %xmm13, 176(%rsp); \ |
| 109 | vmovups %xmm14, 192(%rsp); \ |
| 110 | vmovups %xmm15, 208(%rsp); \ |
| 111 | vzeroupper; |
| 112 | #define EPILOGUE \ |
| 113 | vzeroupper; \ |
| 114 | movq (%rsp), %rbx; \ |
| 115 | movq 8(%rsp), %rbp; \ |
| 116 | movq 16(%rsp), %r12; \ |
| 117 | movq 24(%rsp), %r13; \ |
| 118 | movq 32(%rsp), %r14; \ |
| 119 | movq 40(%rsp), %r15; \ |
| 120 | movq 48(%rsp), %rdi; \ |
| 121 | movq 56(%rsp), %rsi; \ |
| 122 | vmovups 64(%rsp), %xmm6; \ |
| 123 | vmovups 80(%rsp), %xmm7; \ |
| 124 | vmovups 96(%rsp), %xmm8; \ |
| 125 | vmovups 112(%rsp), %xmm9; \ |
| 126 | vmovups 128(%rsp), %xmm10; \ |
| 127 | vmovups 144(%rsp), %xmm11; \ |
| 128 | vmovups 160(%rsp), %xmm12; \ |
| 129 | vmovups 176(%rsp), %xmm13; \ |
| 130 | vmovups 192(%rsp), %xmm14; \ |
| 131 | vmovups 208(%rsp), %xmm15; \ |
| 132 | addq $STACKSIZE, %rsp; |
| 133 | |
| 134 | #else |
| 135 | |
| 136 | #error wrong OS |
| 137 | |
| 138 | #endif |
| 139 | |
| 140 | |
| 141 | |
| 142 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 143 | .text |
| 144 | #elif defined(OS_MAC) |
| 145 | .section __TEXT,__text,regular,pure_instructions |
| 146 | #endif |
| 147 | |
| 148 | |
| 149 | |
| 150 | |
| 151 | |
| 152 | // 1 2 3 4 5 |
| 153 | // void kernel_dger4_sub_8r_lib4(int k, double *A, int sda, double *B, double *C) |
| 154 | |
| 155 | .p2align 4,,15 |
| 156 | #if defined(OS_LINUX) |
| 157 | .globl kernel_dger4_sub_8r_lib4 |
| 158 | .type kernel_dger4_sub_8r_lib4, @function |
| 159 | kernel_dger4_sub_8r_lib4: |
| 160 | #elif defined(OS_MAC) |
| 161 | .globl _kernel_dger4_sub_8r_lib4 |
| 162 | _kernel_dger4_sub_8r_lib4: |
| 163 | #elif defined(OS_WINDOWS) |
| 164 | .globl kernel_dger4_sub_8r_lib4 |
| 165 | .def kernel_dger4_sub_8r_lib4; .scl 2; .type 32; .endef |
| 166 | kernel_dger4_sub_8r_lib4: |
| 167 | #endif |
| 168 | |
| 169 | PROLOGUE |
| 170 | |
| 171 | movq ARG1, %r10 // k |
| 172 | movq ARG2, %r11 // A |
| 173 | movq ARG3, %r12 // sda |
| 174 | sall $5, %r12d // 4*sda*sizeof(double) |
| 175 | movq ARG4, %r13 // B |
| 176 | movq ARG5, %r14 // C |
| 177 | movq ARG6, %r15 // C |
| 178 | sall $5, %r15d // 4*sdc*sizeof(double) |
| 179 | |
| 180 | cmpl $0, %r10d |
| 181 | jle 0f // return |
| 182 | |
| 183 | // load block from A |
| 184 | vmovapd 0(%r11), %ymm0 |
| 185 | vmovapd 32(%r11), %ymm1 |
| 186 | vmovapd 64(%r11), %ymm2 |
| 187 | vmovapd 96(%r11), %ymm3 |
| 188 | |
| 189 | vmovapd 0(%r11, %r12, 1), %ymm4 |
| 190 | vmovapd 32(%r11, %r12, 1), %ymm5 |
| 191 | vmovapd 64(%r11, %r12, 1), %ymm6 |
| 192 | vmovapd 96(%r11, %r12, 1), %ymm7 |
| 193 | |
| 194 | cmpl $3, %r10d |
| 195 | jle 2f // cleanup loop |
| 196 | |
| 197 | // main loop |
| 198 | .p2align 3 |
| 199 | 1: |
| 200 | vmovapd 0(%r14), %ymm8 |
| 201 | vmovapd 0(%r14, %r15, 1), %ymm9 |
| 202 | vbroadcastsd 0(%r13), %ymm15 |
| 203 | vmulpd %ymm0, %ymm15, %ymm14 |
| 204 | vsubpd %ymm14, %ymm8, %ymm8 |
| 205 | vmulpd %ymm4, %ymm15, %ymm14 |
| 206 | vsubpd %ymm14, %ymm9, %ymm9 |
| 207 | vbroadcastsd 8(%r13), %ymm15 |
| 208 | subl $4, %r10d |
| 209 | vmulpd %ymm1, %ymm15, %ymm14 |
| 210 | vsubpd %ymm14, %ymm8, %ymm8 |
| 211 | vmulpd %ymm5, %ymm15, %ymm14 |
| 212 | vsubpd %ymm14, %ymm9, %ymm9 |
| 213 | vbroadcastsd 16(%r13), %ymm15 |
| 214 | vmulpd %ymm2, %ymm15, %ymm14 |
| 215 | vsubpd %ymm14, %ymm8, %ymm8 |
| 216 | vmulpd %ymm6, %ymm15, %ymm14 |
| 217 | vsubpd %ymm14, %ymm9, %ymm9 |
| 218 | vbroadcastsd 24(%r13), %ymm15 |
| 219 | vmulpd %ymm3, %ymm15, %ymm14 |
| 220 | vsubpd %ymm14, %ymm8, %ymm8 |
| 221 | vmulpd %ymm7, %ymm15, %ymm14 |
| 222 | vsubpd %ymm14, %ymm9, %ymm9 |
| 223 | vmovapd %ymm8, 0(%r14) |
| 224 | vmovapd %ymm9, 0(%r14, %r15, 1) |
| 225 | |
| 226 | vmovapd 32(%r14), %ymm8 |
| 227 | vmovapd 32(%r14, %r15, 1), %ymm9 |
| 228 | vbroadcastsd 32(%r13), %ymm15 |
| 229 | vmulpd %ymm0, %ymm15, %ymm14 |
| 230 | vsubpd %ymm14, %ymm8, %ymm8 |
| 231 | vmulpd %ymm4, %ymm15, %ymm14 |
| 232 | vsubpd %ymm14, %ymm9, %ymm9 |
| 233 | vbroadcastsd 40(%r13), %ymm15 |
| 234 | vmulpd %ymm1, %ymm15, %ymm14 |
| 235 | vsubpd %ymm14, %ymm8, %ymm8 |
| 236 | vmulpd %ymm5, %ymm15, %ymm14 |
| 237 | vsubpd %ymm14, %ymm9, %ymm9 |
| 238 | vbroadcastsd 48(%r13), %ymm15 |
| 239 | vmulpd %ymm2, %ymm15, %ymm14 |
| 240 | vsubpd %ymm14, %ymm8, %ymm8 |
| 241 | vmulpd %ymm6, %ymm15, %ymm14 |
| 242 | vsubpd %ymm14, %ymm9, %ymm9 |
| 243 | vbroadcastsd 56(%r13), %ymm15 |
| 244 | vmulpd %ymm3, %ymm15, %ymm14 |
| 245 | vsubpd %ymm14, %ymm8, %ymm8 |
| 246 | vmulpd %ymm7, %ymm15, %ymm14 |
| 247 | vsubpd %ymm14, %ymm9, %ymm9 |
| 248 | vmovapd %ymm8, 32(%r14) |
| 249 | vmovapd %ymm9, 32(%r14, %r15, 1) |
| 250 | |
| 251 | vmovapd 64(%r14), %ymm8 |
| 252 | vmovapd 64(%r14, %r15, 1), %ymm9 |
| 253 | vbroadcastsd 64(%r13), %ymm15 |
| 254 | vmulpd %ymm0, %ymm15, %ymm14 |
| 255 | vsubpd %ymm14, %ymm8, %ymm8 |
| 256 | vmulpd %ymm4, %ymm15, %ymm14 |
| 257 | vsubpd %ymm14, %ymm9, %ymm9 |
| 258 | vbroadcastsd 72(%r13), %ymm15 |
| 259 | vmulpd %ymm1, %ymm15, %ymm14 |
| 260 | vsubpd %ymm14, %ymm8, %ymm8 |
| 261 | vmulpd %ymm5, %ymm15, %ymm14 |
| 262 | vsubpd %ymm14, %ymm9, %ymm9 |
| 263 | vbroadcastsd 80(%r13), %ymm15 |
| 264 | vmulpd %ymm2, %ymm15, %ymm14 |
| 265 | vsubpd %ymm14, %ymm8, %ymm8 |
| 266 | vmulpd %ymm6, %ymm15, %ymm14 |
| 267 | vsubpd %ymm14, %ymm9, %ymm9 |
| 268 | vbroadcastsd 88(%r13), %ymm15 |
| 269 | vmulpd %ymm3, %ymm15, %ymm14 |
| 270 | vsubpd %ymm14, %ymm8, %ymm8 |
| 271 | vmulpd %ymm7, %ymm15, %ymm14 |
| 272 | vsubpd %ymm14, %ymm9, %ymm9 |
| 273 | vmovapd %ymm8, 64(%r14) |
| 274 | vmovapd %ymm9, 64(%r14, %r15, 1) |
| 275 | |
| 276 | vmovapd 96(%r14), %ymm8 |
| 277 | vmovapd 96(%r14, %r15, 1), %ymm9 |
| 278 | vbroadcastsd 96(%r13), %ymm15 |
| 279 | addq $128, %r13 |
| 280 | vmulpd %ymm0, %ymm15, %ymm14 |
| 281 | vsubpd %ymm14, %ymm8, %ymm8 |
| 282 | vmulpd %ymm4, %ymm15, %ymm14 |
| 283 | vsubpd %ymm14, %ymm9, %ymm9 |
| 284 | vbroadcastsd -24(%r13), %ymm15 |
| 285 | vmulpd %ymm1, %ymm15, %ymm14 |
| 286 | vsubpd %ymm14, %ymm8, %ymm8 |
| 287 | vmulpd %ymm5, %ymm15, %ymm14 |
| 288 | vsubpd %ymm14, %ymm9, %ymm9 |
| 289 | vbroadcastsd -16(%r13), %ymm15 |
| 290 | vmulpd %ymm2, %ymm15, %ymm14 |
| 291 | vsubpd %ymm14, %ymm8, %ymm8 |
| 292 | vmulpd %ymm6, %ymm15, %ymm14 |
| 293 | vsubpd %ymm14, %ymm9, %ymm9 |
| 294 | vbroadcastsd -8(%r13), %ymm15 |
| 295 | addq $128, %r14 |
| 296 | vmulpd %ymm3, %ymm15, %ymm14 |
| 297 | vsubpd %ymm14, %ymm8, %ymm8 |
| 298 | vmulpd %ymm7, %ymm15, %ymm14 |
| 299 | vsubpd %ymm14, %ymm9, %ymm9 |
| 300 | vmovapd %ymm8, -32(%r14) |
| 301 | vmovapd %ymm9, -32(%r14, %r15, 1) |
| 302 | |
| 303 | cmpl $3, %r10d |
| 304 | jg 1b // main loop |
| 305 | |
| 306 | cmpl $0, %r10d |
| 307 | jle 0f // return |
| 308 | |
| 309 | // cleanup loop |
| 310 | 2: |
| 311 | vmovapd 0(%r14), %ymm8 |
| 312 | vmovapd 0(%r14, %r15, 1), %ymm9 |
| 313 | vbroadcastsd 0(%r13), %ymm15 |
| 314 | vmulpd %ymm0, %ymm15, %ymm14 |
| 315 | vsubpd %ymm14, %ymm8, %ymm8 |
| 316 | vmulpd %ymm4, %ymm15, %ymm14 |
| 317 | vsubpd %ymm14, %ymm9, %ymm9 |
| 318 | vbroadcastsd 8(%r13), %ymm15 |
| 319 | vmulpd %ymm1, %ymm15, %ymm14 |
| 320 | vsubpd %ymm14, %ymm8, %ymm8 |
| 321 | vmulpd %ymm5, %ymm15, %ymm14 |
| 322 | vsubpd %ymm14, %ymm9, %ymm9 |
| 323 | vbroadcastsd 16(%r13), %ymm15 |
| 324 | vmulpd %ymm2, %ymm15, %ymm14 |
| 325 | vsubpd %ymm14, %ymm8, %ymm8 |
| 326 | vmulpd %ymm6, %ymm15, %ymm14 |
| 327 | vsubpd %ymm14, %ymm9, %ymm9 |
| 328 | vbroadcastsd 24(%r13), %ymm15 |
| 329 | vmulpd %ymm3, %ymm15, %ymm14 |
| 330 | vsubpd %ymm14, %ymm8, %ymm8 |
| 331 | vmulpd %ymm7, %ymm15, %ymm14 |
| 332 | vsubpd %ymm14, %ymm9, %ymm9 |
| 333 | vmovapd %ymm8, 0(%r14) |
| 334 | vmovapd %ymm9, 0(%r14, %r15, 1) |
| 335 | |
| 336 | addq $32, %r13 |
| 337 | addq $32, %r14 |
| 338 | |
| 339 | subl $1, %r10d |
| 340 | cmpl $0, %r10d |
| 341 | jg 2b // main loop |
| 342 | |
| 343 | // return |
| 344 | 0: |
| 345 | |
| 346 | EPILOGUE |
| 347 | |
| 348 | ret |
| 349 | |
| 350 | #if defined(OS_LINUX) |
| 351 | .size kernel_dger4_sub_8r_lib4, .-kernel_dger4_sub_8r_lib4 |
| 352 | #endif |
| 353 | |
| 354 | |
| 355 | |
| 356 | |
| 357 | |
| 358 | // 1 2 3 4 5 6 7 |
| 359 | // void kernel_dger4_sub_8_vs_lib4(int k, double *A, int sda, double *B, double *C, int sdc, int km) |
| 360 | |
| 361 | .p2align 4,,15 |
| 362 | #if defined(OS_LINUX) |
| 363 | .globl kernel_dger4_sub_8r_vs_lib4 |
| 364 | .type kernel_dger4_sub_8r_vs_lib4, @function |
| 365 | kernel_dger4_sub_8r_vs_lib4: |
| 366 | #elif defined(OS_MAC) |
| 367 | .globl _kernel_dger4_sub_8r_vs_lib4 |
| 368 | _kernel_dger4_sub_8r_vs_lib4: |
| 369 | #elif defined(OS_WINDOWS) |
| 370 | .globl kernel_dger4_sub_8r_vs_lib4 |
| 371 | .def kernel_dger4_sub_8r_vs_lib4; .scl 2; .type 32; .endef |
| 372 | kernel_dger4_sub_8r_vs_lib4: |
| 373 | #endif |
| 374 | |
| 375 | PROLOGUE |
| 376 | |
| 377 | movq ARG1, %r10 // k |
| 378 | movq ARG2, %r11 // A |
| 379 | movq ARG3, %r12 // sda |
| 380 | sall $5, %r12d // 4*sda*sizeof(double) |
| 381 | movq ARG4, %r13 // B |
| 382 | movq ARG5, %r14 // C |
| 383 | movq ARG6, %r15 // C |
| 384 | sall $5, %r15d // 4*sdc*sizeof(double) |
| 385 | movq ARG7, %rax // km |
| 386 | |
| 387 | cmpl $0, %r10d |
| 388 | jle 0f // return |
| 389 | |
| 390 | vcvtsi2sd %eax, %xmm15, %xmm15 |
| 391 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 392 | vmovupd .LC01(%rip), %ymm14 |
| 393 | #elif defined(OS_MAC) |
| 394 | vmovupd LC01(%rip), %ymm14 |
| 395 | #endif |
| 396 | vmovddup %xmm15, %xmm15 |
| 397 | vinsertf128 $1, %xmm15, %ymm15, %ymm15 |
| 398 | vsubpd %ymm15, %ymm14, %ymm15 |
| 399 | |
| 400 | // load block from A |
| 401 | vmovapd 0(%r11), %ymm0 |
| 402 | vmovapd 32(%r11), %ymm1 |
| 403 | vmovapd 64(%r11), %ymm2 |
| 404 | vmovapd 96(%r11), %ymm3 |
| 405 | |
| 406 | vmaskmovpd 0(%r11, %r12, 1), %ymm15, %ymm4 |
| 407 | vmaskmovpd 32(%r11, %r12, 1), %ymm15, %ymm5 |
| 408 | vmaskmovpd 64(%r11, %r12, 1), %ymm15, %ymm6 |
| 409 | vmaskmovpd 96(%r11, %r12, 1), %ymm15, %ymm7 |
| 410 | |
| 411 | cmpl $3, %r10d |
| 412 | jle 2f // cleanup loop |
| 413 | |
| 414 | // main loop |
| 415 | .p2align 3 |
| 416 | 1: |
| 417 | vmovapd 0(%r14), %ymm8 |
| 418 | vmovapd 0(%r14, %r15, 1), %ymm9 |
| 419 | vbroadcastsd 0(%r13), %ymm15 |
| 420 | vmulpd %ymm0, %ymm15, %ymm14 |
| 421 | vsubpd %ymm14, %ymm8, %ymm8 |
| 422 | vmulpd %ymm4, %ymm15, %ymm14 |
| 423 | vsubpd %ymm14, %ymm9, %ymm9 |
| 424 | vbroadcastsd 8(%r13), %ymm15 |
| 425 | subl $4, %r10d |
| 426 | vmulpd %ymm1, %ymm15, %ymm14 |
| 427 | vsubpd %ymm14, %ymm8, %ymm8 |
| 428 | vmulpd %ymm5, %ymm15, %ymm14 |
| 429 | vsubpd %ymm14, %ymm9, %ymm9 |
| 430 | vbroadcastsd 16(%r13), %ymm15 |
| 431 | vmulpd %ymm2, %ymm15, %ymm14 |
| 432 | vsubpd %ymm14, %ymm8, %ymm8 |
| 433 | vmulpd %ymm6, %ymm15, %ymm14 |
| 434 | vsubpd %ymm14, %ymm9, %ymm9 |
| 435 | vbroadcastsd 24(%r13), %ymm15 |
| 436 | vmulpd %ymm3, %ymm15, %ymm14 |
| 437 | vsubpd %ymm14, %ymm8, %ymm8 |
| 438 | vmulpd %ymm7, %ymm15, %ymm14 |
| 439 | vsubpd %ymm14, %ymm9, %ymm9 |
| 440 | vmovapd %ymm8, 0(%r14) |
| 441 | vmovapd %ymm9, 0(%r14, %r15, 1) |
| 442 | |
| 443 | vmovapd 32(%r14), %ymm8 |
| 444 | vmovapd 32(%r14, %r15, 1), %ymm9 |
| 445 | vbroadcastsd 32(%r13), %ymm15 |
| 446 | vmulpd %ymm0, %ymm15, %ymm14 |
| 447 | vsubpd %ymm14, %ymm8, %ymm8 |
| 448 | vmulpd %ymm4, %ymm15, %ymm14 |
| 449 | vsubpd %ymm14, %ymm9, %ymm9 |
| 450 | vbroadcastsd 40(%r13), %ymm15 |
| 451 | vmulpd %ymm1, %ymm15, %ymm14 |
| 452 | vsubpd %ymm14, %ymm8, %ymm8 |
| 453 | vmulpd %ymm5, %ymm15, %ymm14 |
| 454 | vsubpd %ymm14, %ymm9, %ymm9 |
| 455 | vbroadcastsd 48(%r13), %ymm15 |
| 456 | vmulpd %ymm2, %ymm15, %ymm14 |
| 457 | vsubpd %ymm14, %ymm8, %ymm8 |
| 458 | vmulpd %ymm6, %ymm15, %ymm14 |
| 459 | vsubpd %ymm14, %ymm9, %ymm9 |
| 460 | vbroadcastsd 56(%r13), %ymm15 |
| 461 | vmulpd %ymm3, %ymm15, %ymm14 |
| 462 | vsubpd %ymm14, %ymm8, %ymm8 |
| 463 | vmulpd %ymm7, %ymm15, %ymm14 |
| 464 | vsubpd %ymm14, %ymm9, %ymm9 |
| 465 | vmovapd %ymm8, 32(%r14) |
| 466 | vmovapd %ymm9, 32(%r14, %r15, 1) |
| 467 | |
| 468 | vmovapd 64(%r14), %ymm8 |
| 469 | vmovapd 64(%r14, %r15, 1), %ymm9 |
| 470 | vbroadcastsd 64(%r13), %ymm15 |
| 471 | vmulpd %ymm0, %ymm15, %ymm14 |
| 472 | vsubpd %ymm14, %ymm8, %ymm8 |
| 473 | vmulpd %ymm4, %ymm15, %ymm14 |
| 474 | vsubpd %ymm14, %ymm9, %ymm9 |
| 475 | vbroadcastsd 72(%r13), %ymm15 |
| 476 | vmulpd %ymm1, %ymm15, %ymm14 |
| 477 | vsubpd %ymm14, %ymm8, %ymm8 |
| 478 | vmulpd %ymm5, %ymm15, %ymm14 |
| 479 | vsubpd %ymm14, %ymm9, %ymm9 |
| 480 | vbroadcastsd 80(%r13), %ymm15 |
| 481 | vmulpd %ymm2, %ymm15, %ymm14 |
| 482 | vsubpd %ymm14, %ymm8, %ymm8 |
| 483 | vmulpd %ymm6, %ymm15, %ymm14 |
| 484 | vsubpd %ymm14, %ymm9, %ymm9 |
| 485 | vbroadcastsd 88(%r13), %ymm15 |
| 486 | vmulpd %ymm3, %ymm15, %ymm14 |
| 487 | vsubpd %ymm14, %ymm8, %ymm8 |
| 488 | vmulpd %ymm7, %ymm15, %ymm14 |
| 489 | vsubpd %ymm14, %ymm9, %ymm9 |
| 490 | vmovapd %ymm8, 64(%r14) |
| 491 | vmovapd %ymm9, 64(%r14, %r15, 1) |
| 492 | |
| 493 | vmovapd 96(%r14), %ymm8 |
| 494 | vmovapd 96(%r14, %r15, 1), %ymm9 |
| 495 | vbroadcastsd 96(%r13), %ymm15 |
| 496 | addq $128, %r13 |
| 497 | vmulpd %ymm0, %ymm15, %ymm14 |
| 498 | vsubpd %ymm14, %ymm8, %ymm8 |
| 499 | vmulpd %ymm4, %ymm15, %ymm14 |
| 500 | vsubpd %ymm14, %ymm9, %ymm9 |
| 501 | vbroadcastsd -24(%r13), %ymm15 |
| 502 | vmulpd %ymm1, %ymm15, %ymm14 |
| 503 | vsubpd %ymm14, %ymm8, %ymm8 |
| 504 | vmulpd %ymm5, %ymm15, %ymm14 |
| 505 | vsubpd %ymm14, %ymm9, %ymm9 |
| 506 | vbroadcastsd -16(%r13), %ymm15 |
| 507 | vmulpd %ymm2, %ymm15, %ymm14 |
| 508 | vsubpd %ymm14, %ymm8, %ymm8 |
| 509 | vmulpd %ymm6, %ymm15, %ymm14 |
| 510 | vsubpd %ymm14, %ymm9, %ymm9 |
| 511 | vbroadcastsd -8(%r13), %ymm15 |
| 512 | addq $128, %r14 |
| 513 | vmulpd %ymm3, %ymm15, %ymm14 |
| 514 | vsubpd %ymm14, %ymm8, %ymm8 |
| 515 | vmulpd %ymm7, %ymm15, %ymm14 |
| 516 | vsubpd %ymm14, %ymm9, %ymm9 |
| 517 | vmovapd %ymm8, -32(%r14) |
| 518 | vmovapd %ymm9, -32(%r14, %r15, 1) |
| 519 | |
| 520 | cmpl $3, %r10d |
| 521 | jg 1b // main loop |
| 522 | |
| 523 | cmpl $0, %r10d |
| 524 | jle 0f // return |
| 525 | |
| 526 | // cleanup loop |
| 527 | 2: |
| 528 | vmovapd 0(%r14), %ymm8 |
| 529 | vmovapd 0(%r14, %r15, 1), %ymm9 |
| 530 | vbroadcastsd 0(%r13), %ymm15 |
| 531 | vmulpd %ymm0, %ymm15, %ymm14 |
| 532 | vsubpd %ymm14, %ymm8, %ymm8 |
| 533 | vmulpd %ymm4, %ymm15, %ymm14 |
| 534 | vsubpd %ymm14, %ymm9, %ymm9 |
| 535 | vbroadcastsd 8(%r13), %ymm15 |
| 536 | vmulpd %ymm1, %ymm15, %ymm14 |
| 537 | vsubpd %ymm14, %ymm8, %ymm8 |
| 538 | vmulpd %ymm5, %ymm15, %ymm14 |
| 539 | vsubpd %ymm14, %ymm9, %ymm9 |
| 540 | vbroadcastsd 16(%r13), %ymm15 |
| 541 | vmulpd %ymm2, %ymm15, %ymm14 |
| 542 | vsubpd %ymm14, %ymm8, %ymm8 |
| 543 | vmulpd %ymm6, %ymm15, %ymm14 |
| 544 | vsubpd %ymm14, %ymm9, %ymm9 |
| 545 | vbroadcastsd 24(%r13), %ymm15 |
| 546 | vmulpd %ymm3, %ymm15, %ymm14 |
| 547 | vsubpd %ymm14, %ymm8, %ymm8 |
| 548 | vmulpd %ymm7, %ymm15, %ymm14 |
| 549 | vsubpd %ymm14, %ymm9, %ymm9 |
| 550 | vmovapd %ymm8, 0(%r14) |
| 551 | vmovapd %ymm9, 0(%r14, %r15, 1) |
| 552 | |
| 553 | addq $32, %r13 |
| 554 | addq $32, %r14 |
| 555 | |
| 556 | subl $1, %r10d |
| 557 | cmpl $0, %r10d |
| 558 | jg 2b // main loop |
| 559 | |
| 560 | // return |
| 561 | 0: |
| 562 | |
| 563 | EPILOGUE |
| 564 | |
| 565 | ret |
| 566 | |
| 567 | #if defined(OS_LINUX) |
| 568 | .size kernel_dger4_sub_8r_vs_lib4, .-kernel_dger4_sub_8r_vs_lib4 |
| 569 | #endif |
| 570 | |
| 571 | |
| 572 | |
| 573 | |
| 574 | |
| 575 | // 1 2 3 4 |
| 576 | // void kernel_dger4_sub_4r_lib4(int n, double *A, double *B, double *C) |
| 577 | |
| 578 | .p2align 4,,15 |
| 579 | #if defined(OS_LINUX) |
| 580 | .globl kernel_dger4_sub_4r_lib4 |
| 581 | .type kernel_dger4_sub_4r_lib4, @function |
| 582 | kernel_dger4_sub_4r_lib4: |
| 583 | #elif defined(OS_MAC) |
| 584 | .globl _kernel_dger4_sub_4r_lib4 |
| 585 | _kernel_dger4_sub_4r_lib4: |
| 586 | #elif defined(OS_WINDOWS) |
| 587 | .globl kernel_dger4_sub_4r_lib4 |
| 588 | .def kernel_dger4_sub_4r_lib4; .scl 2; .type 32; .endef |
| 589 | kernel_dger4_sub_4r_lib4: |
| 590 | #endif |
| 591 | |
| 592 | PROLOGUE |
| 593 | |
| 594 | movq ARG1, %r10 |
| 595 | movq ARG2, %r11 |
| 596 | movq ARG3, %r12 |
| 597 | movq ARG4, %r13 |
| 598 | |
| 599 | cmpl $0, %r10d |
| 600 | jle 0f // return |
| 601 | |
| 602 | // load block from A |
| 603 | vmovapd 0(%r11), %ymm0 |
| 604 | vmovapd 32(%r11), %ymm1 |
| 605 | vmovapd 64(%r11), %ymm2 |
| 606 | vmovapd 96(%r11), %ymm3 |
| 607 | |
| 608 | cmpl $3, %r10d |
| 609 | jle 2f // cleanup loop |
| 610 | |
| 611 | // main loop |
| 612 | .p2align 3 |
| 613 | 1: |
| 614 | vmovapd 0(%r13), %ymm4 |
| 615 | vbroadcastsd 0(%r12), %ymm15 |
| 616 | vmulpd %ymm0, %ymm15, %ymm14 |
| 617 | vsubpd %ymm14, %ymm4, %ymm4 |
| 618 | vbroadcastsd 8(%r12), %ymm15 |
| 619 | subl $4, %r10d |
| 620 | vmulpd %ymm1, %ymm15, %ymm14 |
| 621 | vsubpd %ymm14, %ymm4, %ymm4 |
| 622 | vbroadcastsd 16(%r12), %ymm15 |
| 623 | vmulpd %ymm2, %ymm15, %ymm14 |
| 624 | vsubpd %ymm14, %ymm4, %ymm4 |
| 625 | vbroadcastsd 24(%r12), %ymm15 |
| 626 | vmulpd %ymm3, %ymm15, %ymm14 |
| 627 | vsubpd %ymm14, %ymm4, %ymm4 |
| 628 | vmovapd %ymm4, 0(%r13) |
| 629 | |
| 630 | vmovapd 32(%r13), %ymm4 |
| 631 | vbroadcastsd 32(%r12), %ymm15 |
| 632 | vmulpd %ymm0, %ymm15, %ymm14 |
| 633 | vsubpd %ymm14, %ymm4, %ymm4 |
| 634 | vbroadcastsd 40(%r12), %ymm15 |
| 635 | vmulpd %ymm1, %ymm15, %ymm14 |
| 636 | vsubpd %ymm14, %ymm4, %ymm4 |
| 637 | vbroadcastsd 48(%r12), %ymm15 |
| 638 | vmulpd %ymm2, %ymm15, %ymm14 |
| 639 | vsubpd %ymm14, %ymm4, %ymm4 |
| 640 | vbroadcastsd 56(%r12), %ymm15 |
| 641 | vmulpd %ymm3, %ymm15, %ymm14 |
| 642 | vsubpd %ymm14, %ymm4, %ymm4 |
| 643 | vmovapd %ymm4, 32(%r13) |
| 644 | |
| 645 | vmovapd 64(%r13), %ymm4 |
| 646 | vbroadcastsd 64(%r12), %ymm15 |
| 647 | vmulpd %ymm0, %ymm15, %ymm14 |
| 648 | vsubpd %ymm14, %ymm4, %ymm4 |
| 649 | vbroadcastsd 72(%r12), %ymm15 |
| 650 | vmulpd %ymm1, %ymm15, %ymm14 |
| 651 | vsubpd %ymm14, %ymm4, %ymm4 |
| 652 | vbroadcastsd 80(%r12), %ymm15 |
| 653 | vmulpd %ymm2, %ymm15, %ymm14 |
| 654 | vsubpd %ymm14, %ymm4, %ymm4 |
| 655 | vbroadcastsd 88(%r12), %ymm15 |
| 656 | vmulpd %ymm3, %ymm15, %ymm14 |
| 657 | vsubpd %ymm14, %ymm4, %ymm4 |
| 658 | vmovapd %ymm4, 64(%r13) |
| 659 | |
| 660 | vmovapd 96(%r13), %ymm4 |
| 661 | vbroadcastsd 96(%r12), %ymm15 |
| 662 | addq $128, %r12 |
| 663 | vmulpd %ymm0, %ymm15, %ymm14 |
| 664 | vsubpd %ymm14, %ymm4, %ymm4 |
| 665 | vbroadcastsd -24(%r12), %ymm15 |
| 666 | vmulpd %ymm1, %ymm15, %ymm14 |
| 667 | vsubpd %ymm14, %ymm4, %ymm4 |
| 668 | vbroadcastsd -16(%r12), %ymm15 |
| 669 | vmulpd %ymm2, %ymm15, %ymm14 |
| 670 | vsubpd %ymm14, %ymm4, %ymm4 |
| 671 | vbroadcastsd -8(%r12), %ymm15 |
| 672 | addq $128, %r13 |
| 673 | vmulpd %ymm3, %ymm15, %ymm14 |
| 674 | vsubpd %ymm14, %ymm4, %ymm4 |
| 675 | vmovapd %ymm4, -32(%r13) |
| 676 | |
| 677 | cmpl $3, %r10d |
| 678 | jg 1b // main loop |
| 679 | |
| 680 | cmpl $0, %r10d |
| 681 | jle 0f // return |
| 682 | |
| 683 | // cleanup loop |
| 684 | 2: |
| 685 | vmovapd 0(%r13), %ymm4 |
| 686 | vbroadcastsd 0(%r12), %ymm15 |
| 687 | vmulpd %ymm0, %ymm15, %ymm14 |
| 688 | vsubpd %ymm14, %ymm4, %ymm4 |
| 689 | vbroadcastsd 8(%r12), %ymm15 |
| 690 | vmulpd %ymm1, %ymm15, %ymm14 |
| 691 | vsubpd %ymm14, %ymm4, %ymm4 |
| 692 | vbroadcastsd 16(%r12), %ymm15 |
| 693 | vmulpd %ymm2, %ymm15, %ymm14 |
| 694 | vsubpd %ymm14, %ymm4, %ymm4 |
| 695 | vbroadcastsd 24(%r12), %ymm15 |
| 696 | vmulpd %ymm3, %ymm15, %ymm14 |
| 697 | vsubpd %ymm14, %ymm4, %ymm4 |
| 698 | vmovapd %ymm4, 0(%r13) |
| 699 | |
| 700 | addq $32, %r12 |
| 701 | addq $32, %r13 |
| 702 | |
| 703 | subl $1, %r10d |
| 704 | cmpl $0, %r10d |
| 705 | jg 2b // main loop |
| 706 | |
| 707 | // return |
| 708 | 0: |
| 709 | |
| 710 | EPILOGUE |
| 711 | |
| 712 | ret |
| 713 | |
| 714 | #if defined(OS_LINUX) |
| 715 | .size kernel_dger4_sub_4r_lib4, .-kernel_dger4_sub_4r_lib4 |
| 716 | #endif |
| 717 | |
| 718 | |
| 719 | |
| 720 | |
| 721 | |
| 722 | // 1 2 3 4 5 |
| 723 | // void kernel_dger4_sub_4_vs_lib4(int n, double *A, double *B, double *C, int km) |
| 724 | |
| 725 | .p2align 4,,15 |
| 726 | #if defined(OS_LINUX) |
| 727 | .globl kernel_dger4_sub_4r_vs_lib4 |
| 728 | .type kernel_dger4_sub_4r_vs_lib4, @function |
| 729 | kernel_dger4_sub_4r_vs_lib4: |
| 730 | #elif defined(OS_MAC) |
| 731 | .globl _kernel_dger4_sub_4r_vs_lib4 |
| 732 | _kernel_dger4_sub_4r_vs_lib4: |
| 733 | #elif defined(OS_WINDOWS) |
| 734 | .globl kernel_dger4_sub_4r_vs_lib4 |
| 735 | .def kernel_dger4_sub_4r_vs_lib4; .scl 2; .type 32; .endef |
| 736 | kernel_dger4_sub_4r_vs_lib4: |
| 737 | #endif |
| 738 | |
| 739 | PROLOGUE |
| 740 | |
| 741 | movq ARG1, %r10 |
| 742 | movq ARG2, %r11 |
| 743 | movq ARG3, %r12 |
| 744 | movq ARG4, %r13 |
| 745 | movq ARG5, %r14 |
| 746 | |
| 747 | cmpl $0, %r10d |
| 748 | jle 0f // return |
| 749 | |
| 750 | vcvtsi2sd %r14d, %xmm15, %xmm15 |
| 751 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 752 | vmovupd .LC00(%rip), %ymm14 |
| 753 | #elif defined(OS_MAC) |
| 754 | vmovupd LC00(%rip), %ymm14 |
| 755 | #endif |
| 756 | vmovddup %xmm15, %xmm15 |
| 757 | vinsertf128 $1, %xmm15, %ymm15, %ymm15 |
| 758 | vsubpd %ymm15, %ymm14, %ymm15 |
| 759 | |
| 760 | // load block from A |
| 761 | vmaskmovpd 0(%r11), %ymm15, %ymm0 |
| 762 | vmaskmovpd 32(%r11), %ymm15, %ymm1 |
| 763 | vmaskmovpd 64(%r11), %ymm15, %ymm2 |
| 764 | vmaskmovpd 96(%r11), %ymm15, %ymm3 |
| 765 | |
| 766 | cmpl $3, %r10d |
| 767 | jle 2f // cleanup loop |
| 768 | |
| 769 | // main loop |
| 770 | .p2align 3 |
| 771 | 1: |
| 772 | vmovapd 0(%r13), %ymm4 |
| 773 | vbroadcastsd 0(%r12), %ymm15 |
| 774 | vmulpd %ymm0, %ymm15, %ymm14 |
| 775 | vsubpd %ymm14, %ymm4, %ymm4 |
| 776 | vbroadcastsd 8(%r12), %ymm15 |
| 777 | subl $4, %r10d |
| 778 | vmulpd %ymm1, %ymm15, %ymm14 |
| 779 | vsubpd %ymm14, %ymm4, %ymm4 |
| 780 | vbroadcastsd 16(%r12), %ymm15 |
| 781 | vmulpd %ymm2, %ymm15, %ymm14 |
| 782 | vsubpd %ymm14, %ymm4, %ymm4 |
| 783 | vbroadcastsd 24(%r12), %ymm15 |
| 784 | vmulpd %ymm3, %ymm15, %ymm14 |
| 785 | vsubpd %ymm14, %ymm4, %ymm4 |
| 786 | vmovapd %ymm4, 0(%r13) |
| 787 | |
| 788 | vmovapd 32(%r13), %ymm4 |
| 789 | vbroadcastsd 32(%r12), %ymm15 |
| 790 | vmulpd %ymm0, %ymm15, %ymm14 |
| 791 | vsubpd %ymm14, %ymm4, %ymm4 |
| 792 | vbroadcastsd 40(%r12), %ymm15 |
| 793 | vmulpd %ymm1, %ymm15, %ymm14 |
| 794 | vsubpd %ymm14, %ymm4, %ymm4 |
| 795 | vbroadcastsd 48(%r12), %ymm15 |
| 796 | vmulpd %ymm2, %ymm15, %ymm14 |
| 797 | vsubpd %ymm14, %ymm4, %ymm4 |
| 798 | vbroadcastsd 56(%r12), %ymm15 |
| 799 | vmulpd %ymm3, %ymm15, %ymm14 |
| 800 | vsubpd %ymm14, %ymm4, %ymm4 |
| 801 | vmovapd %ymm4, 32(%r13) |
| 802 | |
| 803 | vmovapd 64(%r13), %ymm4 |
| 804 | vbroadcastsd 64(%r12), %ymm15 |
| 805 | vmulpd %ymm0, %ymm15, %ymm14 |
| 806 | vsubpd %ymm14, %ymm4, %ymm4 |
| 807 | vbroadcastsd 72(%r12), %ymm15 |
| 808 | vmulpd %ymm1, %ymm15, %ymm14 |
| 809 | vsubpd %ymm14, %ymm4, %ymm4 |
| 810 | vbroadcastsd 80(%r12), %ymm15 |
| 811 | vmulpd %ymm2, %ymm15, %ymm14 |
| 812 | vsubpd %ymm14, %ymm4, %ymm4 |
| 813 | vbroadcastsd 88(%r12), %ymm15 |
| 814 | vmulpd %ymm3, %ymm15, %ymm14 |
| 815 | vsubpd %ymm14, %ymm4, %ymm4 |
| 816 | vmovapd %ymm4, 64(%r13) |
| 817 | |
| 818 | vmovapd 96(%r13), %ymm4 |
| 819 | vbroadcastsd 96(%r12), %ymm15 |
| 820 | addq $128, %r12 |
| 821 | vmulpd %ymm0, %ymm15, %ymm14 |
| 822 | vsubpd %ymm14, %ymm4, %ymm4 |
| 823 | vbroadcastsd -24(%r12), %ymm15 |
| 824 | vmulpd %ymm1, %ymm15, %ymm14 |
| 825 | vsubpd %ymm14, %ymm4, %ymm4 |
| 826 | vbroadcastsd -16(%r12), %ymm15 |
| 827 | vmulpd %ymm2, %ymm15, %ymm14 |
| 828 | vsubpd %ymm14, %ymm4, %ymm4 |
| 829 | vbroadcastsd -8(%r12), %ymm15 |
| 830 | addq $128, %r13 |
| 831 | vmulpd %ymm3, %ymm15, %ymm14 |
| 832 | vsubpd %ymm14, %ymm4, %ymm4 |
| 833 | vmovapd %ymm4, -32(%r13) |
| 834 | |
| 835 | cmpl $3, %r10d |
| 836 | jg 1b // main loop |
| 837 | |
| 838 | cmpl $0, %r10d |
| 839 | jle 0f // return |
| 840 | |
| 841 | // cleanup loop |
| 842 | 2: |
| 843 | vmovapd 0(%r13), %ymm4 |
| 844 | vbroadcastsd 0(%r12), %ymm15 |
| 845 | vmulpd %ymm0, %ymm15, %ymm14 |
| 846 | vsubpd %ymm14, %ymm4, %ymm4 |
| 847 | vbroadcastsd 8(%r12), %ymm15 |
| 848 | vmulpd %ymm1, %ymm15, %ymm14 |
| 849 | vsubpd %ymm14, %ymm4, %ymm4 |
| 850 | vbroadcastsd 16(%r12), %ymm15 |
| 851 | vmulpd %ymm2, %ymm15, %ymm14 |
| 852 | vsubpd %ymm14, %ymm4, %ymm4 |
| 853 | vbroadcastsd 24(%r12), %ymm15 |
| 854 | vmulpd %ymm3, %ymm15, %ymm14 |
| 855 | vsubpd %ymm14, %ymm4, %ymm4 |
| 856 | vmovapd %ymm4, 0(%r13) |
| 857 | |
| 858 | addq $32, %r12 |
| 859 | addq $32, %r13 |
| 860 | |
| 861 | subl $1, %r10d |
| 862 | cmpl $0, %r10d |
| 863 | jg 2b // main loop |
| 864 | |
| 865 | // return |
| 866 | 0: |
| 867 | |
| 868 | EPILOGUE |
| 869 | |
| 870 | ret |
| 871 | |
| 872 | #if defined(OS_LINUX) |
| 873 | .size kernel_dger4_sub_4r_vs_lib4, .-kernel_dger4_sub_4r_vs_lib4 |
| 874 | #endif |
| 875 | |
| 876 | |
| 877 | |
| 878 | |
| 879 | |
| 880 | // read-only data |
| 881 | #if defined(OS_LINUX) |
| 882 | .section .rodata.cst32,"aM",@progbits,32 |
| 883 | #elif defined(OS_MAC) |
| 884 | .section __TEXT,__const |
| 885 | #elif defined(OS_WINDOWS) |
| 886 | .section .rdata,"dr" |
| 887 | #endif |
| 888 | |
| 889 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 890 | .align 32 |
| 891 | .LC00: |
| 892 | #elif defined(OS_MAC) |
| 893 | LC00: |
| 894 | .align 5 |
| 895 | #endif |
| 896 | .double 0.5 |
| 897 | .double 1.5 |
| 898 | .double 2.5 |
| 899 | .double 3.5 |
| 900 | |
| 901 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 902 | .align 32 |
| 903 | .LC01: |
| 904 | #elif defined(OS_MAC) |
| 905 | LC01: |
| 906 | .align 5 |
| 907 | #endif |
| 908 | .double 4.5 |
| 909 | .double 5.5 |
| 910 | .double 6.5 |
| 911 | .double 7.5 |
| 912 | |
| 913 | #if defined(OS_LINUX) | defined(OS_WINDOWS) |
| 914 | .align 32 |
| 915 | .LC02: |
| 916 | #elif defined(OS_MAC) |
| 917 | LC02: |
| 918 | .align 5 |
| 919 | #endif |
| 920 | .double 8.5 |
| 921 | .double 9.5 |
| 922 | .double 10.5 |
| 923 | .double 11.5 |
| 924 | |
| 925 | |
| 926 | |
| 927 | |
| 928 | |
| 929 | #if defined(OS_LINUX) |
| 930 | .section .note.GNU-stack,"",@progbits |
| 931 | #elif defined(OS_MAC) |
| 932 | .subsections_via_symbols |
| 933 | #endif |
| 934 | |
| 935 | |