slaqr5.c
Go to the documentation of this file.
00001 /* slaqr5.f -- translated by f2c (version 20061008).
00002    You must link the resulting object file with libf2c:
00003         on Microsoft Windows system, link with libf2c.lib;
00004         on Linux or Unix systems, link with .../path/to/libf2c.a -lm
00005         or, if you install libf2c.a in a standard place, with -lf2c -lm
00006         -- in that order, at the end of the command line, as in
00007                 cc *.o -lf2c -lm
00008         Source for libf2c is in /netlib/f2c/libf2c.zip, e.g.,
00009 
00010                 http://www.netlib.org/f2c/libf2c.zip
00011 */
00012 
00013 #include "f2c.h"
00014 #include "blaswrap.h"
00015 
00016 /* Table of constant values */
00017 
00018 static real c_b7 = 0.f;
00019 static real c_b8 = 1.f;
00020 static integer c__3 = 3;
00021 static integer c__1 = 1;
00022 static integer c__2 = 2;
00023 
00024 /* Subroutine */ int slaqr5_(logical *wantt, logical *wantz, integer *kacc22, 
00025         integer *n, integer *ktop, integer *kbot, integer *nshfts, real *sr, 
00026         real *si, real *h__, integer *ldh, integer *iloz, integer *ihiz, real 
00027         *z__, integer *ldz, real *v, integer *ldv, real *u, integer *ldu, 
00028         integer *nv, real *wv, integer *ldwv, integer *nh, real *wh, integer *
00029         ldwh)
00030 {
00031     /* System generated locals */
00032     integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1, 
00033             wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
00034              i__4, i__5, i__6, i__7;
00035     real r__1, r__2, r__3, r__4, r__5;
00036 
00037     /* Local variables */
00038     integer i__, j, k, m, i2, j2, i4, j4, k1;
00039     real h11, h12, h21, h22;
00040     integer m22, ns, nu;
00041     real vt[3], scl;
00042     integer kdu, kms;
00043     real ulp;
00044     integer knz, kzs;
00045     real tst1, tst2, beta;
00046     logical blk22, bmp22;
00047     integer mend, jcol, jlen, jbot, mbot;
00048     real swap;
00049     integer jtop, jrow, mtop;
00050     real alpha;
00051     logical accum;
00052     integer ndcol, incol;
00053     extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *, 
00054             integer *, real *, real *, integer *, real *, integer *, real *, 
00055             real *, integer *);
00056     integer krcol, nbmps;
00057     extern /* Subroutine */ int strmm_(char *, char *, char *, char *, 
00058             integer *, integer *, real *, real *, integer *, real *, integer *
00059 ), slaqr1_(integer *, real *, 
00060             integer *, real *, real *, real *, real *, real *), slabad_(real *
00061 , real *);
00062     extern doublereal slamch_(char *);
00063     real safmin;
00064     extern /* Subroutine */ int slarfg_(integer *, real *, real *, integer *, 
00065             real *);
00066     real safmax;
00067     extern /* Subroutine */ int slacpy_(char *, integer *, integer *, real *, 
00068             integer *, real *, integer *), slaset_(char *, integer *, 
00069             integer *, real *, real *, real *, integer *);
00070     real refsum;
00071     integer mstart;
00072     real smlnum;
00073 
00074 
00075 /*  -- LAPACK auxiliary routine (version 3.2) -- */
00076 /*     Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */
00077 /*     November 2006 */
00078 
00079 /*     .. Scalar Arguments .. */
00080 /*     .. */
00081 /*     .. Array Arguments .. */
00082 /*     .. */
00083 
00084 /*     This auxiliary subroutine called by SLAQR0 performs a */
00085 /*     single small-bulge multi-shift QR sweep. */
00086 
00087 /*      WANTT  (input) logical scalar */
00088 /*             WANTT = .true. if the quasi-triangular Schur factor */
00089 /*             is being computed.  WANTT is set to .false. otherwise. */
00090 
00091 /*      WANTZ  (input) logical scalar */
00092 /*             WANTZ = .true. if the orthogonal Schur factor is being */
00093 /*             computed.  WANTZ is set to .false. otherwise. */
00094 
00095 /*      KACC22 (input) integer with value 0, 1, or 2. */
00096 /*             Specifies the computation mode of far-from-diagonal */
00097 /*             orthogonal updates. */
00098 /*        = 0: SLAQR5 does not accumulate reflections and does not */
00099 /*             use matrix-matrix multiply to update far-from-diagonal */
00100 /*             matrix entries. */
00101 /*        = 1: SLAQR5 accumulates reflections and uses matrix-matrix */
00102 /*             multiply to update the far-from-diagonal matrix entries. */
00103 /*        = 2: SLAQR5 accumulates reflections, uses matrix-matrix */
00104 /*             multiply to update the far-from-diagonal matrix entries, */
00105 /*             and takes advantage of 2-by-2 block structure during */
00106 /*             matrix multiplies. */
00107 
00108 /*      N      (input) integer scalar */
00109 /*             N is the order of the Hessenberg matrix H upon which this */
00110 /*             subroutine operates. */
00111 
00112 /*      KTOP   (input) integer scalar */
00113 /*      KBOT   (input) integer scalar */
00114 /*             These are the first and last rows and columns of an */
00115 /*             isolated diagonal block upon which the QR sweep is to be */
00116 /*             applied. It is assumed without a check that */
00117 /*                       either KTOP = 1  or   H(KTOP,KTOP-1) = 0 */
00118 /*             and */
00119 /*                       either KBOT = N  or   H(KBOT+1,KBOT) = 0. */
00120 
00121 /*      NSHFTS (input) integer scalar */
00122 /*             NSHFTS gives the number of simultaneous shifts.  NSHFTS */
00123 /*             must be positive and even. */
00124 
00125 /*      SR     (input/output) REAL array of size (NSHFTS) */
00126 /*      SI     (input/output) REAL array of size (NSHFTS) */
00127 /*             SR contains the real parts and SI contains the imaginary */
00128 /*             parts of the NSHFTS shifts of origin that define the */
00129 /*             multi-shift QR sweep.  On output SR and SI may be */
00130 /*             reordered. */
00131 
00132 /*      H      (input/output) REAL array of size (LDH,N) */
00133 /*             On input H contains a Hessenberg matrix.  On output a */
00134 /*             multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
00135 /*             to the isolated diagonal block in rows and columns KTOP */
00136 /*             through KBOT. */
00137 
00138 /*      LDH    (input) integer scalar */
00139 /*             LDH is the leading dimension of H just as declared in the */
00140 /*             calling procedure.  LDH.GE.MAX(1,N). */
00141 
00142 /*      ILOZ   (input) INTEGER */
00143 /*      IHIZ   (input) INTEGER */
00144 /*             Specify the rows of Z to which transformations must be */
00145 /*             applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N */
00146 
00147 /*      Z      (input/output) REAL array of size (LDZ,IHI) */
00148 /*             If WANTZ = .TRUE., then the QR Sweep orthogonal */
00149 /*             similarity transformation is accumulated into */
00150 /*             Z(ILOZ:IHIZ,ILO:IHI) from the right. */
00151 /*             If WANTZ = .FALSE., then Z is unreferenced. */
00152 
00153 /*      LDZ    (input) integer scalar */
00154 /*             LDA is the leading dimension of Z just as declared in */
00155 /*             the calling procedure. LDZ.GE.N. */
00156 
00157 /*      V      (workspace) REAL array of size (LDV,NSHFTS/2) */
00158 
00159 /*      LDV    (input) integer scalar */
00160 /*             LDV is the leading dimension of V as declared in the */
00161 /*             calling procedure.  LDV.GE.3. */
00162 
00163 /*      U      (workspace) REAL array of size */
00164 /*             (LDU,3*NSHFTS-3) */
00165 
00166 /*      LDU    (input) integer scalar */
00167 /*             LDU is the leading dimension of U just as declared in the */
00168 /*             in the calling subroutine.  LDU.GE.3*NSHFTS-3. */
00169 
00170 /*      NH     (input) integer scalar */
00171 /*             NH is the number of columns in array WH available for */
00172 /*             workspace. NH.GE.1. */
00173 
00174 /*      WH     (workspace) REAL array of size (LDWH,NH) */
00175 
00176 /*      LDWH   (input) integer scalar */
00177 /*             Leading dimension of WH just as declared in the */
00178 /*             calling procedure.  LDWH.GE.3*NSHFTS-3. */
00179 
00180 /*      NV     (input) integer scalar */
00181 /*             NV is the number of rows in WV agailable for workspace. */
00182 /*             NV.GE.1. */
00183 
00184 /*      WV     (workspace) REAL array of size */
00185 /*             (LDWV,3*NSHFTS-3) */
00186 
00187 /*      LDWV   (input) integer scalar */
00188 /*             LDWV is the leading dimension of WV as declared in the */
00189 /*             in the calling subroutine.  LDWV.GE.NV. */
00190 
00191 /*     ================================================================ */
00192 /*     Based on contributions by */
00193 /*        Karen Braman and Ralph Byers, Department of Mathematics, */
00194 /*        University of Kansas, USA */
00195 
00196 /*     ================================================================ */
00197 /*     Reference: */
00198 
00199 /*     K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
00200 /*     Algorithm Part I: Maintaining Well Focused Shifts, and */
00201 /*     Level 3 Performance, SIAM Journal of Matrix Analysis, */
00202 /*     volume 23, pages 929--947, 2002. */
00203 
00204 /*     ================================================================ */
00205 /*     .. Parameters .. */
00206 /*     .. */
00207 /*     .. Local Scalars .. */
00208 /*     .. */
00209 /*     .. External Functions .. */
00210 /*     .. */
00211 /*     .. Intrinsic Functions .. */
00212 
00213 /*     .. */
00214 /*     .. Local Arrays .. */
00215 /*     .. */
00216 /*     .. External Subroutines .. */
00217 /*     .. */
00218 /*     .. Executable Statements .. */
00219 
00220 /*     ==== If there are no shifts, then there is nothing to do. ==== */
00221 
00222     /* Parameter adjustments */
00223     --sr;
00224     --si;
00225     h_dim1 = *ldh;
00226     h_offset = 1 + h_dim1;
00227     h__ -= h_offset;
00228     z_dim1 = *ldz;
00229     z_offset = 1 + z_dim1;
00230     z__ -= z_offset;
00231     v_dim1 = *ldv;
00232     v_offset = 1 + v_dim1;
00233     v -= v_offset;
00234     u_dim1 = *ldu;
00235     u_offset = 1 + u_dim1;
00236     u -= u_offset;
00237     wv_dim1 = *ldwv;
00238     wv_offset = 1 + wv_dim1;
00239     wv -= wv_offset;
00240     wh_dim1 = *ldwh;
00241     wh_offset = 1 + wh_dim1;
00242     wh -= wh_offset;
00243 
00244     /* Function Body */
00245     if (*nshfts < 2) {
00246         return 0;
00247     }
00248 
00249 /*     ==== If the active block is empty or 1-by-1, then there */
00250 /*     .    is nothing to do. ==== */
00251 
00252     if (*ktop >= *kbot) {
00253         return 0;
00254     }
00255 
00256 /*     ==== Shuffle shifts into pairs of real shifts and pairs */
00257 /*     .    of complex conjugate shifts assuming complex */
00258 /*     .    conjugate shifts are already adjacent to one */
00259 /*     .    another. ==== */
00260 
00261     i__1 = *nshfts - 2;
00262     for (i__ = 1; i__ <= i__1; i__ += 2) {
00263         if (si[i__] != -si[i__ + 1]) {
00264 
00265             swap = sr[i__];
00266             sr[i__] = sr[i__ + 1];
00267             sr[i__ + 1] = sr[i__ + 2];
00268             sr[i__ + 2] = swap;
00269 
00270             swap = si[i__];
00271             si[i__] = si[i__ + 1];
00272             si[i__ + 1] = si[i__ + 2];
00273             si[i__ + 2] = swap;
00274         }
00275 /* L10: */
00276     }
00277 
00278 /*     ==== NSHFTS is supposed to be even, but if it is odd, */
00279 /*     .    then simply reduce it by one.  The shuffle above */
00280 /*     .    ensures that the dropped shift is real and that */
00281 /*     .    the remaining shifts are paired. ==== */
00282 
00283     ns = *nshfts - *nshfts % 2;
00284 
00285 /*     ==== Machine constants for deflation ==== */
00286 
00287     safmin = slamch_("SAFE MINIMUM");
00288     safmax = 1.f / safmin;
00289     slabad_(&safmin, &safmax);
00290     ulp = slamch_("PRECISION");
00291     smlnum = safmin * ((real) (*n) / ulp);
00292 
00293 /*     ==== Use accumulated reflections to update far-from-diagonal */
00294 /*     .    entries ? ==== */
00295 
00296     accum = *kacc22 == 1 || *kacc22 == 2;
00297 
00298 /*     ==== If so, exploit the 2-by-2 block structure? ==== */
00299 
00300     blk22 = ns > 2 && *kacc22 == 2;
00301 
00302 /*     ==== clear trash ==== */
00303 
00304     if (*ktop + 2 <= *kbot) {
00305         h__[*ktop + 2 + *ktop * h_dim1] = 0.f;
00306     }
00307 
00308 /*     ==== NBMPS = number of 2-shift bulges in the chain ==== */
00309 
00310     nbmps = ns / 2;
00311 
00312 /*     ==== KDU = width of slab ==== */
00313 
00314     kdu = nbmps * 6 - 3;
00315 
00316 /*     ==== Create and chase chains of NBMPS bulges ==== */
00317 
00318     i__1 = *kbot - 2;
00319     i__2 = nbmps * 3 - 2;
00320     for (incol = (1 - nbmps) * 3 + *ktop - 1; i__2 < 0 ? incol >= i__1 : 
00321             incol <= i__1; incol += i__2) {
00322         ndcol = incol + kdu;
00323         if (accum) {
00324             slaset_("ALL", &kdu, &kdu, &c_b7, &c_b8, &u[u_offset], ldu);
00325         }
00326 
00327 /*        ==== Near-the-diagonal bulge chase.  The following loop */
00328 /*        .    performs the near-the-diagonal part of a small bulge */
00329 /*        .    multi-shift QR sweep.  Each 6*NBMPS-2 column diagonal */
00330 /*        .    chunk extends from column INCOL to column NDCOL */
00331 /*        .    (including both column INCOL and column NDCOL). The */
00332 /*        .    following loop chases a 3*NBMPS column long chain of */
00333 /*        .    NBMPS bulges 3*NBMPS-2 columns to the right.  (INCOL */
00334 /*        .    may be less than KTOP and and NDCOL may be greater than */
00335 /*        .    KBOT indicating phantom columns from which to chase */
00336 /*        .    bulges before they are actually introduced or to which */
00337 /*        .    to chase bulges beyond column KBOT.)  ==== */
00338 
00339 /* Computing MIN */
00340         i__4 = incol + nbmps * 3 - 3, i__5 = *kbot - 2;
00341         i__3 = min(i__4,i__5);
00342         for (krcol = incol; krcol <= i__3; ++krcol) {
00343 
00344 /*           ==== Bulges number MTOP to MBOT are active double implicit */
00345 /*           .    shift bulges.  There may or may not also be small */
00346 /*           .    2-by-2 bulge, if there is room.  The inactive bulges */
00347 /*           .    (if any) must wait until the active bulges have moved */
00348 /*           .    down the diagonal to make room.  The phantom matrix */
00349 /*           .    paradigm described above helps keep track.  ==== */
00350 
00351 /* Computing MAX */
00352             i__4 = 1, i__5 = (*ktop - 1 - krcol + 2) / 3 + 1;
00353             mtop = max(i__4,i__5);
00354 /* Computing MIN */
00355             i__4 = nbmps, i__5 = (*kbot - krcol) / 3;
00356             mbot = min(i__4,i__5);
00357             m22 = mbot + 1;
00358             bmp22 = mbot < nbmps && krcol + (m22 - 1) * 3 == *kbot - 2;
00359 
00360 /*           ==== Generate reflections to chase the chain right */
00361 /*           .    one column.  (The minimum value of K is KTOP-1.) ==== */
00362 
00363             i__4 = mbot;
00364             for (m = mtop; m <= i__4; ++m) {
00365                 k = krcol + (m - 1) * 3;
00366                 if (k == *ktop - 1) {
00367                     slaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &sr[(m 
00368                             << 1) - 1], &si[(m << 1) - 1], &sr[m * 2], &si[m *
00369                              2], &v[m * v_dim1 + 1]);
00370                     alpha = v[m * v_dim1 + 1];
00371                     slarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m * 
00372                             v_dim1 + 1]);
00373                 } else {
00374                     beta = h__[k + 1 + k * h_dim1];
00375                     v[m * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
00376                     v[m * v_dim1 + 3] = h__[k + 3 + k * h_dim1];
00377                     slarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m * 
00378                             v_dim1 + 1]);
00379 
00380 /*                 ==== A Bulge may collapse because of vigilant */
00381 /*                 .    deflation or destructive underflow.  In the */
00382 /*                 .    underflow case, try the two-small-subdiagonals */
00383 /*                 .    trick to try to reinflate the bulge.  ==== */
00384 
00385                     if (h__[k + 3 + k * h_dim1] != 0.f || h__[k + 3 + (k + 1) 
00386                             * h_dim1] != 0.f || h__[k + 3 + (k + 2) * h_dim1] 
00387                             == 0.f) {
00388 
00389 /*                    ==== Typical case: not collapsed (yet). ==== */
00390 
00391                         h__[k + 1 + k * h_dim1] = beta;
00392                         h__[k + 2 + k * h_dim1] = 0.f;
00393                         h__[k + 3 + k * h_dim1] = 0.f;
00394                     } else {
00395 
00396 /*                    ==== Atypical case: collapsed.  Attempt to */
00397 /*                    .    reintroduce ignoring H(K+1,K) and H(K+2,K). */
00398 /*                    .    If the fill resulting from the new */
00399 /*                    .    reflector is too large, then abandon it. */
00400 /*                    .    Otherwise, use the new one. ==== */
00401 
00402                         slaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
00403                                 sr[(m << 1) - 1], &si[(m << 1) - 1], &sr[m * 
00404                                 2], &si[m * 2], vt);
00405                         alpha = vt[0];
00406                         slarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
00407                         refsum = vt[0] * (h__[k + 1 + k * h_dim1] + vt[1] * 
00408                                 h__[k + 2 + k * h_dim1]);
00409 
00410                         if ((r__1 = h__[k + 2 + k * h_dim1] - refsum * vt[1], 
00411                                 dabs(r__1)) + (r__2 = refsum * vt[2], dabs(
00412                                 r__2)) > ulp * ((r__3 = h__[k + k * h_dim1], 
00413                                 dabs(r__3)) + (r__4 = h__[k + 1 + (k + 1) * 
00414                                 h_dim1], dabs(r__4)) + (r__5 = h__[k + 2 + (k 
00415                                 + 2) * h_dim1], dabs(r__5)))) {
00416 
00417 /*                       ==== Starting a new bulge here would */
00418 /*                       .    create non-negligible fill.  Use */
00419 /*                       .    the old one with trepidation. ==== */
00420 
00421                             h__[k + 1 + k * h_dim1] = beta;
00422                             h__[k + 2 + k * h_dim1] = 0.f;
00423                             h__[k + 3 + k * h_dim1] = 0.f;
00424                         } else {
00425 
00426 /*                       ==== Stating a new bulge here would */
00427 /*                       .    create only negligible fill. */
00428 /*                       .    Replace the old reflector with */
00429 /*                       .    the new one. ==== */
00430 
00431                             h__[k + 1 + k * h_dim1] -= refsum;
00432                             h__[k + 2 + k * h_dim1] = 0.f;
00433                             h__[k + 3 + k * h_dim1] = 0.f;
00434                             v[m * v_dim1 + 1] = vt[0];
00435                             v[m * v_dim1 + 2] = vt[1];
00436                             v[m * v_dim1 + 3] = vt[2];
00437                         }
00438                     }
00439                 }
00440 /* L20: */
00441             }
00442 
00443 /*           ==== Generate a 2-by-2 reflection, if needed. ==== */
00444 
00445             k = krcol + (m22 - 1) * 3;
00446             if (bmp22) {
00447                 if (k == *ktop - 1) {
00448                     slaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &sr[(
00449                             m22 << 1) - 1], &si[(m22 << 1) - 1], &sr[m22 * 2], 
00450                              &si[m22 * 2], &v[m22 * v_dim1 + 1]);
00451                     beta = v[m22 * v_dim1 + 1];
00452                     slarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 
00453                             * v_dim1 + 1]);
00454                 } else {
00455                     beta = h__[k + 1 + k * h_dim1];
00456                     v[m22 * v_dim1 + 2] = h__[k + 2 + k * h_dim1];
00457                     slarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22 
00458                             * v_dim1 + 1]);
00459                     h__[k + 1 + k * h_dim1] = beta;
00460                     h__[k + 2 + k * h_dim1] = 0.f;
00461                 }
00462             }
00463 
00464 /*           ==== Multiply H by reflections from the left ==== */
00465 
00466             if (accum) {
00467                 jbot = min(ndcol,*kbot);
00468             } else if (*wantt) {
00469                 jbot = *n;
00470             } else {
00471                 jbot = *kbot;
00472             }
00473             i__4 = jbot;
00474             for (j = max(*ktop,krcol); j <= i__4; ++j) {
00475 /* Computing MIN */
00476                 i__5 = mbot, i__6 = (j - krcol + 2) / 3;
00477                 mend = min(i__5,i__6);
00478                 i__5 = mend;
00479                 for (m = mtop; m <= i__5; ++m) {
00480                     k = krcol + (m - 1) * 3;
00481                     refsum = v[m * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + v[
00482                             m * v_dim1 + 2] * h__[k + 2 + j * h_dim1] + v[m * 
00483                             v_dim1 + 3] * h__[k + 3 + j * h_dim1]);
00484                     h__[k + 1 + j * h_dim1] -= refsum;
00485                     h__[k + 2 + j * h_dim1] -= refsum * v[m * v_dim1 + 2];
00486                     h__[k + 3 + j * h_dim1] -= refsum * v[m * v_dim1 + 3];
00487 /* L30: */
00488                 }
00489 /* L40: */
00490             }
00491             if (bmp22) {
00492                 k = krcol + (m22 - 1) * 3;
00493 /* Computing MAX */
00494                 i__4 = k + 1;
00495                 i__5 = jbot;
00496                 for (j = max(i__4,*ktop); j <= i__5; ++j) {
00497                     refsum = v[m22 * v_dim1 + 1] * (h__[k + 1 + j * h_dim1] + 
00498                             v[m22 * v_dim1 + 2] * h__[k + 2 + j * h_dim1]);
00499                     h__[k + 1 + j * h_dim1] -= refsum;
00500                     h__[k + 2 + j * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
00501 /* L50: */
00502                 }
00503             }
00504 
00505 /*           ==== Multiply H by reflections from the right. */
00506 /*           .    Delay filling in the last row until the */
00507 /*           .    vigilant deflation check is complete. ==== */
00508 
00509             if (accum) {
00510                 jtop = max(*ktop,incol);
00511             } else if (*wantt) {
00512                 jtop = 1;
00513             } else {
00514                 jtop = *ktop;
00515             }
00516             i__5 = mbot;
00517             for (m = mtop; m <= i__5; ++m) {
00518                 if (v[m * v_dim1 + 1] != 0.f) {
00519                     k = krcol + (m - 1) * 3;
00520 /* Computing MIN */
00521                     i__6 = *kbot, i__7 = k + 3;
00522                     i__4 = min(i__6,i__7);
00523                     for (j = jtop; j <= i__4; ++j) {
00524                         refsum = v[m * v_dim1 + 1] * (h__[j + (k + 1) * 
00525                                 h_dim1] + v[m * v_dim1 + 2] * h__[j + (k + 2) 
00526                                 * h_dim1] + v[m * v_dim1 + 3] * h__[j + (k + 
00527                                 3) * h_dim1]);
00528                         h__[j + (k + 1) * h_dim1] -= refsum;
00529                         h__[j + (k + 2) * h_dim1] -= refsum * v[m * v_dim1 + 
00530                                 2];
00531                         h__[j + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 
00532                                 3];
00533 /* L60: */
00534                     }
00535 
00536                     if (accum) {
00537 
00538 /*                    ==== Accumulate U. (If necessary, update Z later */
00539 /*                    .    with with an efficient matrix-matrix */
00540 /*                    .    multiply.) ==== */
00541 
00542                         kms = k - incol;
00543 /* Computing MAX */
00544                         i__4 = 1, i__6 = *ktop - incol;
00545                         i__7 = kdu;
00546                         for (j = max(i__4,i__6); j <= i__7; ++j) {
00547                             refsum = v[m * v_dim1 + 1] * (u[j + (kms + 1) * 
00548                                     u_dim1] + v[m * v_dim1 + 2] * u[j + (kms 
00549                                     + 2) * u_dim1] + v[m * v_dim1 + 3] * u[j 
00550                                     + (kms + 3) * u_dim1]);
00551                             u[j + (kms + 1) * u_dim1] -= refsum;
00552                             u[j + (kms + 2) * u_dim1] -= refsum * v[m * 
00553                                     v_dim1 + 2];
00554                             u[j + (kms + 3) * u_dim1] -= refsum * v[m * 
00555                                     v_dim1 + 3];
00556 /* L70: */
00557                         }
00558                     } else if (*wantz) {
00559 
00560 /*                    ==== U is not accumulated, so update Z */
00561 /*                    .    now by multiplying by reflections */
00562 /*                    .    from the right. ==== */
00563 
00564                         i__7 = *ihiz;
00565                         for (j = *iloz; j <= i__7; ++j) {
00566                             refsum = v[m * v_dim1 + 1] * (z__[j + (k + 1) * 
00567                                     z_dim1] + v[m * v_dim1 + 2] * z__[j + (k 
00568                                     + 2) * z_dim1] + v[m * v_dim1 + 3] * z__[
00569                                     j + (k + 3) * z_dim1]);
00570                             z__[j + (k + 1) * z_dim1] -= refsum;
00571                             z__[j + (k + 2) * z_dim1] -= refsum * v[m * 
00572                                     v_dim1 + 2];
00573                             z__[j + (k + 3) * z_dim1] -= refsum * v[m * 
00574                                     v_dim1 + 3];
00575 /* L80: */
00576                         }
00577                     }
00578                 }
00579 /* L90: */
00580             }
00581 
00582 /*           ==== Special case: 2-by-2 reflection (if needed) ==== */
00583 
00584             k = krcol + (m22 - 1) * 3;
00585             if (bmp22 && v[m22 * v_dim1 + 1] != 0.f) {
00586 /* Computing MIN */
00587                 i__7 = *kbot, i__4 = k + 3;
00588                 i__5 = min(i__7,i__4);
00589                 for (j = jtop; j <= i__5; ++j) {
00590                     refsum = v[m22 * v_dim1 + 1] * (h__[j + (k + 1) * h_dim1] 
00591                             + v[m22 * v_dim1 + 2] * h__[j + (k + 2) * h_dim1])
00592                             ;
00593                     h__[j + (k + 1) * h_dim1] -= refsum;
00594                     h__[j + (k + 2) * h_dim1] -= refsum * v[m22 * v_dim1 + 2];
00595 /* L100: */
00596                 }
00597 
00598                 if (accum) {
00599                     kms = k - incol;
00600 /* Computing MAX */
00601                     i__5 = 1, i__7 = *ktop - incol;
00602                     i__4 = kdu;
00603                     for (j = max(i__5,i__7); j <= i__4; ++j) {
00604                         refsum = v[m22 * v_dim1 + 1] * (u[j + (kms + 1) * 
00605                                 u_dim1] + v[m22 * v_dim1 + 2] * u[j + (kms + 
00606                                 2) * u_dim1]);
00607                         u[j + (kms + 1) * u_dim1] -= refsum;
00608                         u[j + (kms + 2) * u_dim1] -= refsum * v[m22 * v_dim1 
00609                                 + 2];
00610 /* L110: */
00611                     }
00612                 } else if (*wantz) {
00613                     i__4 = *ihiz;
00614                     for (j = *iloz; j <= i__4; ++j) {
00615                         refsum = v[m22 * v_dim1 + 1] * (z__[j + (k + 1) * 
00616                                 z_dim1] + v[m22 * v_dim1 + 2] * z__[j + (k + 
00617                                 2) * z_dim1]);
00618                         z__[j + (k + 1) * z_dim1] -= refsum;
00619                         z__[j + (k + 2) * z_dim1] -= refsum * v[m22 * v_dim1 
00620                                 + 2];
00621 /* L120: */
00622                     }
00623                 }
00624             }
00625 
00626 /*           ==== Vigilant deflation check ==== */
00627 
00628             mstart = mtop;
00629             if (krcol + (mstart - 1) * 3 < *ktop) {
00630                 ++mstart;
00631             }
00632             mend = mbot;
00633             if (bmp22) {
00634                 ++mend;
00635             }
00636             if (krcol == *kbot - 2) {
00637                 ++mend;
00638             }
00639             i__4 = mend;
00640             for (m = mstart; m <= i__4; ++m) {
00641 /* Computing MIN */
00642                 i__5 = *kbot - 1, i__7 = krcol + (m - 1) * 3;
00643                 k = min(i__5,i__7);
00644 
00645 /*              ==== The following convergence test requires that */
00646 /*              .    the tradition small-compared-to-nearby-diagonals */
00647 /*              .    criterion and the Ahues & Tisseur (LAWN 122, 1997) */
00648 /*              .    criteria both be satisfied.  The latter improves */
00649 /*              .    accuracy in some examples. Falling back on an */
00650 /*              .    alternate convergence criterion when TST1 or TST2 */
00651 /*              .    is zero (as done here) is traditional but probably */
00652 /*              .    unnecessary. ==== */
00653 
00654                 if (h__[k + 1 + k * h_dim1] != 0.f) {
00655                     tst1 = (r__1 = h__[k + k * h_dim1], dabs(r__1)) + (r__2 = 
00656                             h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
00657                     if (tst1 == 0.f) {
00658                         if (k >= *ktop + 1) {
00659                             tst1 += (r__1 = h__[k + (k - 1) * h_dim1], dabs(
00660                                     r__1));
00661                         }
00662                         if (k >= *ktop + 2) {
00663                             tst1 += (r__1 = h__[k + (k - 2) * h_dim1], dabs(
00664                                     r__1));
00665                         }
00666                         if (k >= *ktop + 3) {
00667                             tst1 += (r__1 = h__[k + (k - 3) * h_dim1], dabs(
00668                                     r__1));
00669                         }
00670                         if (k <= *kbot - 2) {
00671                             tst1 += (r__1 = h__[k + 2 + (k + 1) * h_dim1], 
00672                                     dabs(r__1));
00673                         }
00674                         if (k <= *kbot - 3) {
00675                             tst1 += (r__1 = h__[k + 3 + (k + 1) * h_dim1], 
00676                                     dabs(r__1));
00677                         }
00678                         if (k <= *kbot - 4) {
00679                             tst1 += (r__1 = h__[k + 4 + (k + 1) * h_dim1], 
00680                                     dabs(r__1));
00681                         }
00682                     }
00683 /* Computing MAX */
00684                     r__2 = smlnum, r__3 = ulp * tst1;
00685                     if ((r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)) <= dmax(
00686                             r__2,r__3)) {
00687 /* Computing MAX */
00688                         r__3 = (r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)), 
00689                                 r__4 = (r__2 = h__[k + (k + 1) * h_dim1], 
00690                                 dabs(r__2));
00691                         h12 = dmax(r__3,r__4);
00692 /* Computing MIN */
00693                         r__3 = (r__1 = h__[k + 1 + k * h_dim1], dabs(r__1)), 
00694                                 r__4 = (r__2 = h__[k + (k + 1) * h_dim1], 
00695                                 dabs(r__2));
00696                         h21 = dmin(r__3,r__4);
00697 /* Computing MAX */
00698                         r__3 = (r__1 = h__[k + 1 + (k + 1) * h_dim1], dabs(
00699                                 r__1)), r__4 = (r__2 = h__[k + k * h_dim1] - 
00700                                 h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
00701                         h11 = dmax(r__3,r__4);
00702 /* Computing MIN */
00703                         r__3 = (r__1 = h__[k + 1 + (k + 1) * h_dim1], dabs(
00704                                 r__1)), r__4 = (r__2 = h__[k + k * h_dim1] - 
00705                                 h__[k + 1 + (k + 1) * h_dim1], dabs(r__2));
00706                         h22 = dmin(r__3,r__4);
00707                         scl = h11 + h12;
00708                         tst2 = h22 * (h11 / scl);
00709 
00710 /* Computing MAX */
00711                         r__1 = smlnum, r__2 = ulp * tst2;
00712                         if (tst2 == 0.f || h21 * (h12 / scl) <= dmax(r__1,
00713                                 r__2)) {
00714                             h__[k + 1 + k * h_dim1] = 0.f;
00715                         }
00716                     }
00717                 }
00718 /* L130: */
00719             }
00720 
00721 /*           ==== Fill in the last row of each bulge. ==== */
00722 
00723 /* Computing MIN */
00724             i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 3;
00725             mend = min(i__4,i__5);
00726             i__4 = mend;
00727             for (m = mtop; m <= i__4; ++m) {
00728                 k = krcol + (m - 1) * 3;
00729                 refsum = v[m * v_dim1 + 1] * v[m * v_dim1 + 3] * h__[k + 4 + (
00730                         k + 3) * h_dim1];
00731                 h__[k + 4 + (k + 1) * h_dim1] = -refsum;
00732                 h__[k + 4 + (k + 2) * h_dim1] = -refsum * v[m * v_dim1 + 2];
00733                 h__[k + 4 + (k + 3) * h_dim1] -= refsum * v[m * v_dim1 + 3];
00734 /* L140: */
00735             }
00736 
00737 /*           ==== End of near-the-diagonal bulge chase. ==== */
00738 
00739 /* L150: */
00740         }
00741 
00742 /*        ==== Use U (if accumulated) to update far-from-diagonal */
00743 /*        .    entries in H.  If required, use U to update Z as */
00744 /*        .    well. ==== */
00745 
00746         if (accum) {
00747             if (*wantt) {
00748                 jtop = 1;
00749                 jbot = *n;
00750             } else {
00751                 jtop = *ktop;
00752                 jbot = *kbot;
00753             }
00754             if (! blk22 || incol < *ktop || ndcol > *kbot || ns <= 2) {
00755 
00756 /*              ==== Updates not exploiting the 2-by-2 block */
00757 /*              .    structure of U.  K1 and NU keep track of */
00758 /*              .    the location and size of U in the special */
00759 /*              .    cases of introducing bulges and chasing */
00760 /*              .    bulges off the bottom.  In these special */
00761 /*              .    cases and in case the number of shifts */
00762 /*              .    is NS = 2, there is no 2-by-2 block */
00763 /*              .    structure to exploit.  ==== */
00764 
00765 /* Computing MAX */
00766                 i__3 = 1, i__4 = *ktop - incol;
00767                 k1 = max(i__3,i__4);
00768 /* Computing MAX */
00769                 i__3 = 0, i__4 = ndcol - *kbot;
00770                 nu = kdu - max(i__3,i__4) - k1 + 1;
00771 
00772 /*              ==== Horizontal Multiply ==== */
00773 
00774                 i__3 = jbot;
00775                 i__4 = *nh;
00776                 for (jcol = min(ndcol,*kbot) + 1; i__4 < 0 ? jcol >= i__3 : 
00777                         jcol <= i__3; jcol += i__4) {
00778 /* Computing MIN */
00779                     i__5 = *nh, i__7 = jbot - jcol + 1;
00780                     jlen = min(i__5,i__7);
00781                     sgemm_("C", "N", &nu, &jlen, &nu, &c_b8, &u[k1 + k1 * 
00782                             u_dim1], ldu, &h__[incol + k1 + jcol * h_dim1], 
00783                             ldh, &c_b7, &wh[wh_offset], ldwh);
00784                     slacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[
00785                             incol + k1 + jcol * h_dim1], ldh);
00786 /* L160: */
00787                 }
00788 
00789 /*              ==== Vertical multiply ==== */
00790 
00791                 i__4 = max(*ktop,incol) - 1;
00792                 i__3 = *nv;
00793                 for (jrow = jtop; i__3 < 0 ? jrow >= i__4 : jrow <= i__4; 
00794                         jrow += i__3) {
00795 /* Computing MIN */
00796                     i__5 = *nv, i__7 = max(*ktop,incol) - jrow;
00797                     jlen = min(i__5,i__7);
00798                     sgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &h__[jrow + (
00799                             incol + k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1], 
00800                             ldu, &c_b7, &wv[wv_offset], ldwv);
00801                     slacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[
00802                             jrow + (incol + k1) * h_dim1], ldh);
00803 /* L170: */
00804                 }
00805 
00806 /*              ==== Z multiply (also vertical) ==== */
00807 
00808                 if (*wantz) {
00809                     i__3 = *ihiz;
00810                     i__4 = *nv;
00811                     for (jrow = *iloz; i__4 < 0 ? jrow >= i__3 : jrow <= i__3;
00812                              jrow += i__4) {
00813 /* Computing MIN */
00814                         i__5 = *nv, i__7 = *ihiz - jrow + 1;
00815                         jlen = min(i__5,i__7);
00816                         sgemm_("N", "N", &jlen, &nu, &nu, &c_b8, &z__[jrow + (
00817                                 incol + k1) * z_dim1], ldz, &u[k1 + k1 * 
00818                                 u_dim1], ldu, &c_b7, &wv[wv_offset], ldwv);
00819                         slacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
00820                                 jrow + (incol + k1) * z_dim1], ldz)
00821                                 ;
00822 /* L180: */
00823                     }
00824                 }
00825             } else {
00826 
00827 /*              ==== Updates exploiting U's 2-by-2 block structure. */
00828 /*              .    (I2, I4, J2, J4 are the last rows and columns */
00829 /*              .    of the blocks.) ==== */
00830 
00831                 i2 = (kdu + 1) / 2;
00832                 i4 = kdu;
00833                 j2 = i4 - i2;
00834                 j4 = kdu;
00835 
00836 /*              ==== KZS and KNZ deal with the band of zeros */
00837 /*              .    along the diagonal of one of the triangular */
00838 /*              .    blocks. ==== */
00839 
00840                 kzs = j4 - j2 - (ns + 1);
00841                 knz = ns + 1;
00842 
00843 /*              ==== Horizontal multiply ==== */
00844 
00845                 i__4 = jbot;
00846                 i__3 = *nh;
00847                 for (jcol = min(ndcol,*kbot) + 1; i__3 < 0 ? jcol >= i__4 : 
00848                         jcol <= i__4; jcol += i__3) {
00849 /* Computing MIN */
00850                     i__5 = *nh, i__7 = jbot - jcol + 1;
00851                     jlen = min(i__5,i__7);
00852 
00853 /*                 ==== Copy bottom of H to top+KZS of scratch ==== */
00854 /*                  (The first KZS rows get multiplied by zero.) ==== */
00855 
00856                     slacpy_("ALL", &knz, &jlen, &h__[incol + 1 + j2 + jcol * 
00857                             h_dim1], ldh, &wh[kzs + 1 + wh_dim1], ldwh);
00858 
00859 /*                 ==== Multiply by U21' ==== */
00860 
00861                     slaset_("ALL", &kzs, &jlen, &c_b7, &c_b7, &wh[wh_offset], 
00862                             ldwh);
00863                     strmm_("L", "U", "C", "N", &knz, &jlen, &c_b8, &u[j2 + 1 
00864                             + (kzs + 1) * u_dim1], ldu, &wh[kzs + 1 + wh_dim1]
00865 , ldwh);
00866 
00867 /*                 ==== Multiply top of H by U11' ==== */
00868 
00869                     sgemm_("C", "N", &i2, &jlen, &j2, &c_b8, &u[u_offset], 
00870                             ldu, &h__[incol + 1 + jcol * h_dim1], ldh, &c_b8, 
00871                             &wh[wh_offset], ldwh);
00872 
00873 /*                 ==== Copy top of H to bottom of WH ==== */
00874 
00875                     slacpy_("ALL", &j2, &jlen, &h__[incol + 1 + jcol * h_dim1]
00876 , ldh, &wh[i2 + 1 + wh_dim1], ldwh);
00877 
00878 /*                 ==== Multiply by U21' ==== */
00879 
00880                     strmm_("L", "L", "C", "N", &j2, &jlen, &c_b8, &u[(i2 + 1) 
00881                             * u_dim1 + 1], ldu, &wh[i2 + 1 + wh_dim1], ldwh);
00882 
00883 /*                 ==== Multiply by U22 ==== */
00884 
00885                     i__5 = i4 - i2;
00886                     i__7 = j4 - j2;
00887                     sgemm_("C", "N", &i__5, &jlen, &i__7, &c_b8, &u[j2 + 1 + (
00888                             i2 + 1) * u_dim1], ldu, &h__[incol + 1 + j2 + 
00889                             jcol * h_dim1], ldh, &c_b8, &wh[i2 + 1 + wh_dim1], 
00890                              ldwh);
00891 
00892 /*                 ==== Copy it back ==== */
00893 
00894                     slacpy_("ALL", &kdu, &jlen, &wh[wh_offset], ldwh, &h__[
00895                             incol + 1 + jcol * h_dim1], ldh);
00896 /* L190: */
00897                 }
00898 
00899 /*              ==== Vertical multiply ==== */
00900 
00901                 i__3 = max(incol,*ktop) - 1;
00902                 i__4 = *nv;
00903                 for (jrow = jtop; i__4 < 0 ? jrow >= i__3 : jrow <= i__3; 
00904                         jrow += i__4) {
00905 /* Computing MIN */
00906                     i__5 = *nv, i__7 = max(incol,*ktop) - jrow;
00907                     jlen = min(i__5,i__7);
00908 
00909 /*                 ==== Copy right of H to scratch (the first KZS */
00910 /*                 .    columns get multiplied by zero) ==== */
00911 
00912                     slacpy_("ALL", &jlen, &knz, &h__[jrow + (incol + 1 + j2) *
00913                              h_dim1], ldh, &wv[(kzs + 1) * wv_dim1 + 1], ldwv);
00914 
00915 /*                 ==== Multiply by U21 ==== */
00916 
00917                     slaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[wv_offset], 
00918                             ldwv);
00919                     strmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2 + 1 
00920                             + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) * 
00921                             wv_dim1 + 1], ldwv);
00922 
00923 /*                 ==== Multiply by U11 ==== */
00924 
00925                     sgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &h__[jrow + (
00926                             incol + 1) * h_dim1], ldh, &u[u_offset], ldu, &
00927                             c_b8, &wv[wv_offset], ldwv);
00928 
00929 /*                 ==== Copy left of H to right of scratch ==== */
00930 
00931                     slacpy_("ALL", &jlen, &j2, &h__[jrow + (incol + 1) * 
00932                             h_dim1], ldh, &wv[(i2 + 1) * wv_dim1 + 1], ldwv);
00933 
00934 /*                 ==== Multiply by U21 ==== */
00935 
00936                     i__5 = i4 - i2;
00937                     strmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(i2 + 
00938                             1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * wv_dim1 + 1]
00939 , ldwv);
00940 
00941 /*                 ==== Multiply by U22 ==== */
00942 
00943                     i__5 = i4 - i2;
00944                     i__7 = j4 - j2;
00945                     sgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &h__[jrow + (
00946                             incol + 1 + j2) * h_dim1], ldh, &u[j2 + 1 + (i2 + 
00947                             1) * u_dim1], ldu, &c_b8, &wv[(i2 + 1) * wv_dim1 
00948                             + 1], ldwv);
00949 
00950 /*                 ==== Copy it back ==== */
00951 
00952                     slacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &h__[
00953                             jrow + (incol + 1) * h_dim1], ldh);
00954 /* L200: */
00955                 }
00956 
00957 /*              ==== Multiply Z (also vertical) ==== */
00958 
00959                 if (*wantz) {
00960                     i__4 = *ihiz;
00961                     i__3 = *nv;
00962                     for (jrow = *iloz; i__3 < 0 ? jrow >= i__4 : jrow <= i__4;
00963                              jrow += i__3) {
00964 /* Computing MIN */
00965                         i__5 = *nv, i__7 = *ihiz - jrow + 1;
00966                         jlen = min(i__5,i__7);
00967 
00968 /*                    ==== Copy right of Z to left of scratch (first */
00969 /*                    .     KZS columns get multiplied by zero) ==== */
00970 
00971                         slacpy_("ALL", &jlen, &knz, &z__[jrow + (incol + 1 + 
00972                                 j2) * z_dim1], ldz, &wv[(kzs + 1) * wv_dim1 + 
00973                                 1], ldwv);
00974 
00975 /*                    ==== Multiply by U12 ==== */
00976 
00977                         slaset_("ALL", &jlen, &kzs, &c_b7, &c_b7, &wv[
00978                                 wv_offset], ldwv);
00979                         strmm_("R", "U", "N", "N", &jlen, &knz, &c_b8, &u[j2 
00980                                 + 1 + (kzs + 1) * u_dim1], ldu, &wv[(kzs + 1) 
00981                                 * wv_dim1 + 1], ldwv);
00982 
00983 /*                    ==== Multiply by U11 ==== */
00984 
00985                         sgemm_("N", "N", &jlen, &i2, &j2, &c_b8, &z__[jrow + (
00986                                 incol + 1) * z_dim1], ldz, &u[u_offset], ldu, 
00987                                 &c_b8, &wv[wv_offset], ldwv);
00988 
00989 /*                    ==== Copy left of Z to right of scratch ==== */
00990 
00991                         slacpy_("ALL", &jlen, &j2, &z__[jrow + (incol + 1) * 
00992                                 z_dim1], ldz, &wv[(i2 + 1) * wv_dim1 + 1], 
00993                                 ldwv);
00994 
00995 /*                    ==== Multiply by U21 ==== */
00996 
00997                         i__5 = i4 - i2;
00998                         strmm_("R", "L", "N", "N", &jlen, &i__5, &c_b8, &u[(
00999                                 i2 + 1) * u_dim1 + 1], ldu, &wv[(i2 + 1) * 
01000                                 wv_dim1 + 1], ldwv);
01001 
01002 /*                    ==== Multiply by U22 ==== */
01003 
01004                         i__5 = i4 - i2;
01005                         i__7 = j4 - j2;
01006                         sgemm_("N", "N", &jlen, &i__5, &i__7, &c_b8, &z__[
01007                                 jrow + (incol + 1 + j2) * z_dim1], ldz, &u[j2 
01008                                 + 1 + (i2 + 1) * u_dim1], ldu, &c_b8, &wv[(i2 
01009                                 + 1) * wv_dim1 + 1], ldwv);
01010 
01011 /*                    ==== Copy the result back to Z ==== */
01012 
01013                         slacpy_("ALL", &jlen, &kdu, &wv[wv_offset], ldwv, &
01014                                 z__[jrow + (incol + 1) * z_dim1], ldz);
01015 /* L210: */
01016                     }
01017                 }
01018             }
01019         }
01020 /* L220: */
01021     }
01022 
01023 /*     ==== End of SLAQR5 ==== */
01024 
01025     return 0;
01026 } /* slaqr5_ */


swiftnav
Author(s):
autogenerated on Sat Jun 8 2019 18:56:11