270 SUBROUTINE dlasda( ICOMPQ, SMLSIZ, N, SQRE, D, E, U, LDU, VT, K,
271 $ DIFL, DIFR, Z, POLES, GIVPTR, GIVCOL, LDGCOL,
272 $ PERM, GIVNUM, C, S, WORK, IWORK, INFO )
279 INTEGER ICOMPQ, INFO, LDGCOL, LDU, N, SMLSIZ, SQRE
282 INTEGER GIVCOL( LDGCOL, * ), ( * ), ( * ),
283 $ K( * ), PERM( LDGCOL, * )
284 DOUBLE PRECISION C( * ), D( * ), DIFL( LDU, * ), DIFR( LDU, * ),
285 $ E( * ), GIVNUM( LDU, * ), POLES( LDU, * ),
293 DOUBLE PRECISION ZERO, ONE
294 PARAMETER ( ZERO = 0.0d+0, one = 1.0d+0 )
297 INTEGER I, I1, IC, IDXQ, IDXQI, IM1, INODE, ITEMP, IWK,
298 $ J, LF, LL, LVL, LVL2, M, NCC, ND, NDB1, NDIML,
299 $ ndimr,
nl, nlf, nlp1, nlvl, nr, nrf, nrp1, nru,
300 $ nwork1, nwork2, smlszp, sqrei, vf, vfi, vl, vli
301 DOUBLE PRECISION , BETA
312 IF( ( icompq.LT.0 ) .OR. ( icompq.GT.1 ) )
THEN
314 ELSE IF( smlsiz.LT.3 )
THEN
316 ELSE IF( n.LT.0 )
THEN
318 ELSE IF( ( sqre.LT.0 ) .OR. ( sqre.GT.1 ) )
THEN
320 ELSE IF( ldu.LT.( n+sqre ) )
THEN
322 ELSE IF( ldgcol.LT.n )
THEN
326 CALL xerbla(
'DLASDA', -info )
334 IF( n.LE.smlsiz )
THEN
335 IF( icompq.EQ.0 )
THEN
336 CALL dlasdq(
'U', sqre, n, 0, 0, 0, d, e, vt, ldu, u, ldu,
337 $ u, ldu, work, info )
339 CALL dlasdq(
'U', sqre, n, m, n, 0, d, e, vt, ldu, u, ldu,
340 $ u, ldu, work, info )
360 nwork2 = nwork1 + smlszp*smlszp
362 CALL dlasdt( n, nlvl, nd, iwork( inode ), iwork( ndiml ),
363 $ iwork( ndimr ), smlsiz )
378 ic = iwork( inode+i1 )
379 nl = iwork( ndiml+i1 )
381 nr = iwork( ndimr+i1 )
384 idxqi = idxq + nlf - 2
388 IF( icompq.EQ.0 )
THEN
389 CALL dlaset(
'A', nlp1, nlp1, zero, one, work( nwork1 ),
391 CALL dlasdq(
'U', sqrei,
nl, nlp1, nru, ncc, d( nlf ),
392 $ e( nlf ), work( nwork1 ), smlszp,
393 $ work( nwork2 ),
nl, work( nwork2 ),
nl,
394 $ work( nwork2 ), info )
395 itemp = nwork1 +
nl*smlszp
396 CALL dcopy( nlp1, work( nwork1 ), 1, work( vfi ), 1 )
397 CALL dcopy( nlp1, work( itemp ), 1, work( vli ), 1 )
399 CALL dlaset(
'A',
nl,
nl, zero, one, u( nlf, 1 ), ldu )
400 CALL dlaset(
'A', nlp1, nlp1, zero, one, vt( nlf, 1 ), ldu )
401 CALL dlasdq(
'U', sqrei,
nl, nlp1,
nl, ncc, d( nlf ),
402 $ e( nlf ), vt( nlf, 1 ), ldu, u( nlf, 1 ), ldu,
403 $ u( nlf, 1 ), ldu, work( nwork1 ), info )
404 CALL dcopy( nlp1, vt( nlf, 1 ), 1, work( vfi ), 1 )
405 CALL dcopy( nlp1, vt( nlf, nlp1 ), 1, work( vli ), 1 )
413 IF( ( i.EQ.nd ) .AND. ( sqre.EQ.0 ) )
THEN
422 IF( icompq.EQ.0 )
THEN
423 CALL dlaset(
'A', nrp1, nrp1, zero, one, work( nwork1 ),
425 CALL dlasdq(
'U', sqrei, nr, nrp1, nru, ncc, d( nrf ),
426 $ e( nrf ), work( nwork1 ), smlszp,
427 $ work( nwork2 ), nr, work( nwork2 ), nr,
428 $ work( nwork2 ), info )
429 itemp = nwork1 + ( nrp1-1 )*smlszp
430 CALL dcopy( nrp1, work( nwork1 ), 1, work( vfi ), 1 )
431 CALL dcopy( nrp1, work( itemp ), 1, work( vli ), 1 )
433 CALL dlaset(
'A', nr, nr, zero, one, u( nrf, 1 ), ldu )
434 CALL dlaset(
'A', nrp1, nrp1, zero, one, vt( nrf, 1 ), ldu )
435 CALL dlasdq(
'U', sqrei, nr, nrp1, nr, ncc, d( nrf ),
436 $ e( nrf ), vt( nrf, 1 ), ldu, u( nrf, 1 ), ldu,
437 $ u( nrf, 1 ), ldu, work( nwork1 ), info )
438 CALL dcopy( nrp1, vt( nrf, 1 ), 1, work( vfi ), 1 )
439 CALL dcopy( nrp1, vt( nrf, nrp1 ), 1, work( vli ), 1 )
452 DO 50 lvl = nlvl, 1, -1
467 ic = iwork( inode+im1 )
468 nl = iwork( ndiml+im1 )
469 nr = iwork( ndimr+im1 )
479 idxqi = idxq + nlf - 1
482 IF( icompq.EQ.0 )
THEN
483 CALL dlasd6( icompq,
nl, nr, sqrei, d( nlf ),
484 $ work( vfi ), work( vli ),
alpha, beta,
485 $ iwork( idxqi ), perm, givptr( 1 ), givcol,
486 $ ldgcol, givnum, ldu, poles, difl, difr, z,
487 $ k( 1 ), c( 1 ), s( 1 ), work( nwork1 ),
488 $ iwork( iwk ), info )
491 CALL dlasd6( icompq,
nl, nr, sqrei, d( nlf ),
492 $ work( vfi ), work( vli ),
alpha, beta,
493 $ iwork( idxqi ), perm( nlf, lvl ),
494 $ givptr( j ), givcol( nlf, lvl2 ), ldgcol,
495 $ givnum( nlf, lvl2 ), ldu,
496 $ poles( nlf, lvl2 ), difl( nlf, lvl ),
498 $ c( j ), s( j ), work( nwork1 ),
499 $ iwork( iwk ), info )