Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 1 | /* zhbmv.f -- translated by f2c (version 20100827). |
| 2 | You must link the resulting object file with libf2c: |
| 3 | on Microsoft Windows system, link with libf2c.lib; |
| 4 | on Linux or Unix systems, link with .../path/to/libf2c.a -lm |
| 5 | or, if you install libf2c.a in a standard place, with -lf2c -lm |
| 6 | -- in that order, at the end of the command line, as in |
| 7 | cc *.o -lf2c -lm |
| 8 | Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., |
| 9 | |
| 10 | http://www.netlib.org/f2c/libf2c.zip |
| 11 | */ |
| 12 | |
| 13 | #include "datatypes.h" |
| 14 | |
| 15 | /* Subroutine */ int zhbmv_(char *uplo, integer *n, integer *k, doublecomplex |
| 16 | *alpha, doublecomplex *a, integer *lda, doublecomplex *x, integer * |
| 17 | incx, doublecomplex *beta, doublecomplex *y, integer *incy, ftnlen |
| 18 | uplo_len) |
| 19 | { |
| 20 | /* System generated locals */ |
| 21 | integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; |
| 22 | doublereal d__1; |
| 23 | doublecomplex z__1, z__2, z__3, z__4; |
| 24 | |
| 25 | /* Builtin functions */ |
| 26 | void d_cnjg(doublecomplex *, doublecomplex *); |
| 27 | |
| 28 | /* Local variables */ |
| 29 | integer i__, j, l, ix, iy, jx, jy, kx, ky, info; |
| 30 | doublecomplex temp1, temp2; |
| 31 | extern logical lsame_(char *, char *, ftnlen, ftnlen); |
| 32 | integer kplus1; |
| 33 | extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); |
| 34 | |
| 35 | /* .. Scalar Arguments .. */ |
| 36 | /* .. */ |
| 37 | /* .. Array Arguments .. */ |
| 38 | /* .. */ |
| 39 | |
| 40 | /* Purpose */ |
| 41 | /* ======= */ |
| 42 | |
| 43 | /* ZHBMV performs the matrix-vector operation */ |
| 44 | |
| 45 | /* y := alpha*A*x + beta*y, */ |
| 46 | |
| 47 | /* where alpha and beta are scalars, x and y are n element vectors and */ |
| 48 | /* A is an n by n hermitian band matrix, with k super-diagonals. */ |
| 49 | |
| 50 | /* Arguments */ |
| 51 | /* ========== */ |
| 52 | |
| 53 | /* UPLO - CHARACTER*1. */ |
| 54 | /* On entry, UPLO specifies whether the upper or lower */ |
| 55 | /* triangular part of the band matrix A is being supplied as */ |
| 56 | /* follows: */ |
| 57 | |
| 58 | /* UPLO = 'U' or 'u' The upper triangular part of A is */ |
| 59 | /* being supplied. */ |
| 60 | |
| 61 | /* UPLO = 'L' or 'l' The lower triangular part of A is */ |
| 62 | /* being supplied. */ |
| 63 | |
| 64 | /* Unchanged on exit. */ |
| 65 | |
| 66 | /* N - INTEGER. */ |
| 67 | /* On entry, N specifies the order of the matrix A. */ |
| 68 | /* N must be at least zero. */ |
| 69 | /* Unchanged on exit. */ |
| 70 | |
| 71 | /* K - INTEGER. */ |
| 72 | /* On entry, K specifies the number of super-diagonals of the */ |
| 73 | /* matrix A. K must satisfy 0 .le. K. */ |
| 74 | /* Unchanged on exit. */ |
| 75 | |
| 76 | /* ALPHA - COMPLEX*16 . */ |
| 77 | /* On entry, ALPHA specifies the scalar alpha. */ |
| 78 | /* Unchanged on exit. */ |
| 79 | |
| 80 | /* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */ |
| 81 | /* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ |
| 82 | /* by n part of the array A must contain the upper triangular */ |
| 83 | /* band part of the hermitian matrix, supplied column by */ |
| 84 | /* column, with the leading diagonal of the matrix in row */ |
| 85 | /* ( k + 1 ) of the array, the first super-diagonal starting at */ |
| 86 | /* position 2 in row k, and so on. The top left k by k triangle */ |
| 87 | /* of the array A is not referenced. */ |
| 88 | /* The following program segment will transfer the upper */ |
| 89 | /* triangular part of a hermitian band matrix from conventional */ |
| 90 | /* full matrix storage to band storage: */ |
| 91 | |
| 92 | /* DO 20, J = 1, N */ |
| 93 | /* M = K + 1 - J */ |
| 94 | /* DO 10, I = MAX( 1, J - K ), J */ |
| 95 | /* A( M + I, J ) = matrix( I, J ) */ |
| 96 | /* 10 CONTINUE */ |
| 97 | /* 20 CONTINUE */ |
| 98 | |
| 99 | /* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ |
| 100 | /* by n part of the array A must contain the lower triangular */ |
| 101 | /* band part of the hermitian matrix, supplied column by */ |
| 102 | /* column, with the leading diagonal of the matrix in row 1 of */ |
| 103 | /* the array, the first sub-diagonal starting at position 1 in */ |
| 104 | /* row 2, and so on. The bottom right k by k triangle of the */ |
| 105 | /* array A is not referenced. */ |
| 106 | /* The following program segment will transfer the lower */ |
| 107 | /* triangular part of a hermitian band matrix from conventional */ |
| 108 | /* full matrix storage to band storage: */ |
| 109 | |
| 110 | /* DO 20, J = 1, N */ |
| 111 | /* M = 1 - J */ |
| 112 | /* DO 10, I = J, MIN( N, J + K ) */ |
| 113 | /* A( M + I, J ) = matrix( I, J ) */ |
| 114 | /* 10 CONTINUE */ |
| 115 | /* 20 CONTINUE */ |
| 116 | |
| 117 | /* Note that the imaginary parts of the diagonal elements need */ |
| 118 | /* not be set and are assumed to be zero. */ |
| 119 | /* Unchanged on exit. */ |
| 120 | |
| 121 | /* LDA - INTEGER. */ |
| 122 | /* On entry, LDA specifies the first dimension of A as declared */ |
| 123 | /* in the calling (sub) program. LDA must be at least */ |
| 124 | /* ( k + 1 ). */ |
| 125 | /* Unchanged on exit. */ |
| 126 | |
| 127 | /* X - COMPLEX*16 array of DIMENSION at least */ |
| 128 | /* ( 1 + ( n - 1 )*abs( INCX ) ). */ |
| 129 | /* Before entry, the incremented array X must contain the */ |
| 130 | /* vector x. */ |
| 131 | /* Unchanged on exit. */ |
| 132 | |
| 133 | /* INCX - INTEGER. */ |
| 134 | /* On entry, INCX specifies the increment for the elements of */ |
| 135 | /* X. INCX must not be zero. */ |
| 136 | /* Unchanged on exit. */ |
| 137 | |
| 138 | /* BETA - COMPLEX*16 . */ |
| 139 | /* On entry, BETA specifies the scalar beta. */ |
| 140 | /* Unchanged on exit. */ |
| 141 | |
| 142 | /* Y - COMPLEX*16 array of DIMENSION at least */ |
| 143 | /* ( 1 + ( n - 1 )*abs( INCY ) ). */ |
| 144 | /* Before entry, the incremented array Y must contain the */ |
| 145 | /* vector y. On exit, Y is overwritten by the updated vector y. */ |
| 146 | |
| 147 | /* INCY - INTEGER. */ |
| 148 | /* On entry, INCY specifies the increment for the elements of */ |
| 149 | /* Y. INCY must not be zero. */ |
| 150 | /* Unchanged on exit. */ |
| 151 | |
| 152 | /* Further Details */ |
| 153 | /* =============== */ |
| 154 | |
| 155 | /* Level 2 Blas routine. */ |
| 156 | |
| 157 | /* -- Written on 22-October-1986. */ |
| 158 | /* Jack Dongarra, Argonne National Lab. */ |
| 159 | /* Jeremy Du Croz, Nag Central Office. */ |
| 160 | /* Sven Hammarling, Nag Central Office. */ |
| 161 | /* Richard Hanson, Sandia National Labs. */ |
| 162 | |
| 163 | /* ===================================================================== */ |
| 164 | |
| 165 | /* .. Parameters .. */ |
| 166 | /* .. */ |
| 167 | /* .. Local Scalars .. */ |
| 168 | /* .. */ |
| 169 | /* .. External Functions .. */ |
| 170 | /* .. */ |
| 171 | /* .. External Subroutines .. */ |
| 172 | /* .. */ |
| 173 | /* .. Intrinsic Functions .. */ |
| 174 | /* .. */ |
| 175 | |
| 176 | /* Test the input parameters. */ |
| 177 | |
| 178 | /* Parameter adjustments */ |
| 179 | a_dim1 = *lda; |
| 180 | a_offset = 1 + a_dim1; |
| 181 | a -= a_offset; |
| 182 | --x; |
| 183 | --y; |
| 184 | |
| 185 | /* Function Body */ |
| 186 | info = 0; |
| 187 | if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( |
| 188 | ftnlen)1, (ftnlen)1)) { |
| 189 | info = 1; |
| 190 | } else if (*n < 0) { |
| 191 | info = 2; |
| 192 | } else if (*k < 0) { |
| 193 | info = 3; |
| 194 | } else if (*lda < *k + 1) { |
| 195 | info = 6; |
| 196 | } else if (*incx == 0) { |
| 197 | info = 8; |
| 198 | } else if (*incy == 0) { |
| 199 | info = 11; |
| 200 | } |
| 201 | if (info != 0) { |
| 202 | xerbla_("ZHBMV ", &info, (ftnlen)6); |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | /* Quick return if possible. */ |
| 207 | |
| 208 | if (*n == 0 || (alpha->r == 0. && alpha->i == 0. && (beta->r == 1. && |
| 209 | beta->i == 0.))) { |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | /* Set up the start points in X and Y. */ |
| 214 | |
| 215 | if (*incx > 0) { |
| 216 | kx = 1; |
| 217 | } else { |
| 218 | kx = 1 - (*n - 1) * *incx; |
| 219 | } |
| 220 | if (*incy > 0) { |
| 221 | ky = 1; |
| 222 | } else { |
| 223 | ky = 1 - (*n - 1) * *incy; |
| 224 | } |
| 225 | |
| 226 | /* Start the operations. In this version the elements of the array A */ |
| 227 | /* are accessed sequentially with one pass through A. */ |
| 228 | |
| 229 | /* First form y := beta*y. */ |
| 230 | |
| 231 | if (beta->r != 1. || beta->i != 0.) { |
| 232 | if (*incy == 1) { |
| 233 | if (beta->r == 0. && beta->i == 0.) { |
| 234 | i__1 = *n; |
| 235 | for (i__ = 1; i__ <= i__1; ++i__) { |
| 236 | i__2 = i__; |
| 237 | y[i__2].r = 0., y[i__2].i = 0.; |
| 238 | /* L10: */ |
| 239 | } |
| 240 | } else { |
| 241 | i__1 = *n; |
| 242 | for (i__ = 1; i__ <= i__1; ++i__) { |
| 243 | i__2 = i__; |
| 244 | i__3 = i__; |
| 245 | z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, |
| 246 | z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] |
| 247 | .r; |
| 248 | y[i__2].r = z__1.r, y[i__2].i = z__1.i; |
| 249 | /* L20: */ |
| 250 | } |
| 251 | } |
| 252 | } else { |
| 253 | iy = ky; |
| 254 | if (beta->r == 0. && beta->i == 0.) { |
| 255 | i__1 = *n; |
| 256 | for (i__ = 1; i__ <= i__1; ++i__) { |
| 257 | i__2 = iy; |
| 258 | y[i__2].r = 0., y[i__2].i = 0.; |
| 259 | iy += *incy; |
| 260 | /* L30: */ |
| 261 | } |
| 262 | } else { |
| 263 | i__1 = *n; |
| 264 | for (i__ = 1; i__ <= i__1; ++i__) { |
| 265 | i__2 = iy; |
| 266 | i__3 = iy; |
| 267 | z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, |
| 268 | z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] |
| 269 | .r; |
| 270 | y[i__2].r = z__1.r, y[i__2].i = z__1.i; |
| 271 | iy += *incy; |
| 272 | /* L40: */ |
| 273 | } |
| 274 | } |
| 275 | } |
| 276 | } |
| 277 | if (alpha->r == 0. && alpha->i == 0.) { |
| 278 | return 0; |
| 279 | } |
| 280 | if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { |
| 281 | |
| 282 | /* Form y when upper triangle of A is stored. */ |
| 283 | |
| 284 | kplus1 = *k + 1; |
| 285 | if (*incx == 1 && *incy == 1) { |
| 286 | i__1 = *n; |
| 287 | for (j = 1; j <= i__1; ++j) { |
| 288 | i__2 = j; |
| 289 | z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = |
| 290 | alpha->r * x[i__2].i + alpha->i * x[i__2].r; |
| 291 | temp1.r = z__1.r, temp1.i = z__1.i; |
| 292 | temp2.r = 0., temp2.i = 0.; |
| 293 | l = kplus1 - j; |
| 294 | /* Computing MAX */ |
| 295 | i__2 = 1, i__3 = j - *k; |
| 296 | i__4 = j - 1; |
| 297 | for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { |
| 298 | i__2 = i__; |
| 299 | i__3 = i__; |
| 300 | i__5 = l + i__ + j * a_dim1; |
| 301 | z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, |
| 302 | z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] |
| 303 | .r; |
| 304 | z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; |
| 305 | y[i__2].r = z__1.r, y[i__2].i = z__1.i; |
| 306 | d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); |
| 307 | i__2 = i__; |
| 308 | z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i, z__2.i = |
| 309 | z__3.r * x[i__2].i + z__3.i * x[i__2].r; |
| 310 | z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; |
| 311 | temp2.r = z__1.r, temp2.i = z__1.i; |
| 312 | /* L50: */ |
| 313 | } |
| 314 | i__4 = j; |
| 315 | i__2 = j; |
| 316 | i__3 = kplus1 + j * a_dim1; |
| 317 | d__1 = a[i__3].r; |
| 318 | z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; |
| 319 | z__2.r = y[i__2].r + z__3.r, z__2.i = y[i__2].i + z__3.i; |
| 320 | z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = |
| 321 | alpha->r * temp2.i + alpha->i * temp2.r; |
| 322 | z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; |
| 323 | y[i__4].r = z__1.r, y[i__4].i = z__1.i; |
| 324 | /* L60: */ |
| 325 | } |
| 326 | } else { |
| 327 | jx = kx; |
| 328 | jy = ky; |
| 329 | i__1 = *n; |
| 330 | for (j = 1; j <= i__1; ++j) { |
| 331 | i__4 = jx; |
| 332 | z__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, z__1.i = |
| 333 | alpha->r * x[i__4].i + alpha->i * x[i__4].r; |
| 334 | temp1.r = z__1.r, temp1.i = z__1.i; |
| 335 | temp2.r = 0., temp2.i = 0.; |
| 336 | ix = kx; |
| 337 | iy = ky; |
| 338 | l = kplus1 - j; |
| 339 | /* Computing MAX */ |
| 340 | i__4 = 1, i__2 = j - *k; |
| 341 | i__3 = j - 1; |
| 342 | for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { |
| 343 | i__4 = iy; |
| 344 | i__2 = iy; |
| 345 | i__5 = l + i__ + j * a_dim1; |
| 346 | z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, |
| 347 | z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] |
| 348 | .r; |
| 349 | z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; |
| 350 | y[i__4].r = z__1.r, y[i__4].i = z__1.i; |
| 351 | d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); |
| 352 | i__4 = ix; |
| 353 | z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = |
| 354 | z__3.r * x[i__4].i + z__3.i * x[i__4].r; |
| 355 | z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; |
| 356 | temp2.r = z__1.r, temp2.i = z__1.i; |
| 357 | ix += *incx; |
| 358 | iy += *incy; |
| 359 | /* L70: */ |
| 360 | } |
| 361 | i__3 = jy; |
| 362 | i__4 = jy; |
| 363 | i__2 = kplus1 + j * a_dim1; |
| 364 | d__1 = a[i__2].r; |
| 365 | z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; |
| 366 | z__2.r = y[i__4].r + z__3.r, z__2.i = y[i__4].i + z__3.i; |
| 367 | z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = |
| 368 | alpha->r * temp2.i + alpha->i * temp2.r; |
| 369 | z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; |
| 370 | y[i__3].r = z__1.r, y[i__3].i = z__1.i; |
| 371 | jx += *incx; |
| 372 | jy += *incy; |
| 373 | if (j > *k) { |
| 374 | kx += *incx; |
| 375 | ky += *incy; |
| 376 | } |
| 377 | /* L80: */ |
| 378 | } |
| 379 | } |
| 380 | } else { |
| 381 | |
| 382 | /* Form y when lower triangle of A is stored. */ |
| 383 | |
| 384 | if (*incx == 1 && *incy == 1) { |
| 385 | i__1 = *n; |
| 386 | for (j = 1; j <= i__1; ++j) { |
| 387 | i__3 = j; |
| 388 | z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i = |
| 389 | alpha->r * x[i__3].i + alpha->i * x[i__3].r; |
| 390 | temp1.r = z__1.r, temp1.i = z__1.i; |
| 391 | temp2.r = 0., temp2.i = 0.; |
| 392 | i__3 = j; |
| 393 | i__4 = j; |
| 394 | i__2 = j * a_dim1 + 1; |
| 395 | d__1 = a[i__2].r; |
| 396 | z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; |
| 397 | z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; |
| 398 | y[i__3].r = z__1.r, y[i__3].i = z__1.i; |
| 399 | l = 1 - j; |
| 400 | /* Computing MIN */ |
| 401 | i__4 = *n, i__2 = j + *k; |
| 402 | i__3 = min(i__4,i__2); |
| 403 | for (i__ = j + 1; i__ <= i__3; ++i__) { |
| 404 | i__4 = i__; |
| 405 | i__2 = i__; |
| 406 | i__5 = l + i__ + j * a_dim1; |
| 407 | z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, |
| 408 | z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] |
| 409 | .r; |
| 410 | z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; |
| 411 | y[i__4].r = z__1.r, y[i__4].i = z__1.i; |
| 412 | d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); |
| 413 | i__4 = i__; |
| 414 | z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = |
| 415 | z__3.r * x[i__4].i + z__3.i * x[i__4].r; |
| 416 | z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; |
| 417 | temp2.r = z__1.r, temp2.i = z__1.i; |
| 418 | /* L90: */ |
| 419 | } |
| 420 | i__3 = j; |
| 421 | i__4 = j; |
| 422 | z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = |
| 423 | alpha->r * temp2.i + alpha->i * temp2.r; |
| 424 | z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; |
| 425 | y[i__3].r = z__1.r, y[i__3].i = z__1.i; |
| 426 | /* L100: */ |
| 427 | } |
| 428 | } else { |
| 429 | jx = kx; |
| 430 | jy = ky; |
| 431 | i__1 = *n; |
| 432 | for (j = 1; j <= i__1; ++j) { |
| 433 | i__3 = jx; |
| 434 | z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i = |
| 435 | alpha->r * x[i__3].i + alpha->i * x[i__3].r; |
| 436 | temp1.r = z__1.r, temp1.i = z__1.i; |
| 437 | temp2.r = 0., temp2.i = 0.; |
| 438 | i__3 = jy; |
| 439 | i__4 = jy; |
| 440 | i__2 = j * a_dim1 + 1; |
| 441 | d__1 = a[i__2].r; |
| 442 | z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; |
| 443 | z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; |
| 444 | y[i__3].r = z__1.r, y[i__3].i = z__1.i; |
| 445 | l = 1 - j; |
| 446 | ix = jx; |
| 447 | iy = jy; |
| 448 | /* Computing MIN */ |
| 449 | i__4 = *n, i__2 = j + *k; |
| 450 | i__3 = min(i__4,i__2); |
| 451 | for (i__ = j + 1; i__ <= i__3; ++i__) { |
| 452 | ix += *incx; |
| 453 | iy += *incy; |
| 454 | i__4 = iy; |
| 455 | i__2 = iy; |
| 456 | i__5 = l + i__ + j * a_dim1; |
| 457 | z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, |
| 458 | z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] |
| 459 | .r; |
| 460 | z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; |
| 461 | y[i__4].r = z__1.r, y[i__4].i = z__1.i; |
| 462 | d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); |
| 463 | i__4 = ix; |
| 464 | z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = |
| 465 | z__3.r * x[i__4].i + z__3.i * x[i__4].r; |
| 466 | z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; |
| 467 | temp2.r = z__1.r, temp2.i = z__1.i; |
| 468 | /* L110: */ |
| 469 | } |
| 470 | i__3 = jy; |
| 471 | i__4 = jy; |
| 472 | z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = |
| 473 | alpha->r * temp2.i + alpha->i * temp2.r; |
| 474 | z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; |
| 475 | y[i__3].r = z__1.r, y[i__3].i = z__1.i; |
| 476 | jx += *incx; |
| 477 | jy += *incy; |
| 478 | /* L120: */ |
| 479 | } |
| 480 | } |
| 481 | } |
| 482 | |
| 483 | return 0; |
| 484 | |
| 485 | /* End of ZHBMV . */ |
| 486 | |
| 487 | } /* zhbmv_ */ |
| 488 | |