Home | History | Annotate | Download | only in blas

Lines Matching full:alpha

44   Scalar alpha  = *reinterpret_cast<const Scalar*>(palpha);
74 func[code](*m, *n, *k, a, *lda, b, *ldb, c, *ldc, alpha, blocking, 0);
144 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
174 if(alpha!=Scalar(1))
175 matrix(b,*m,*n,*ldb) *= alpha;
181 // b = alpha*op(a)*b for side = 'L'or'l'
182 // b = alpha*b*op(a) for side = 'R'or'r'
249 alpha = *reinterpret_cast<const Scalar*>(palpha);
275 func[code](*m, *n, *m, a, *lda, tmp.data(), tmp.outerStride(), b, *ldb, alpha, blocking);
280 func[code](*m, *n, *n, tmp.data(), tmp.outerStride(), a, *lda, b, *ldb, alpha, blocking);
285 // c = alpha*a*b + beta*c for side = 'L'or'l'
286 // c = alpha*b*a + beta*c for side = 'R'or'r
290 // std::cerr << "in symm " << *side << " " << *uplo << " " << *m << "x" << *n << " lda:" << *lda << " ldb:" << *ldb << " ldc:" << *ldc << " alpha:" << *palpha << " beta:" << *pbeta << "\n";
294 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
334 matrix(c, *m, *n, *ldc) += alpha * matA * matrix(b, *m, *n, *ldb);
336 matrix(c, *m, *n, *ldc) += alpha * matrix(b, *m, *n, *ldb) * matA;
341 if(UPLO(*uplo)==UP) internal::product_selfadjoint_matrix<Scalar, DenseIndex, RowMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha, blocking);
342 else if(UPLO(*uplo)==LO) internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,true,false, ColMajor,false,false, ColMajor>::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha, blocking);
345 if(UPLO(*uplo)==UP) internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, RowMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha, blocking);
346 else if(UPLO(*uplo)==LO) internal::product_selfadjoint_matrix<Scalar, DenseIndex, ColMajor,false,false, ColMajor,true,false, ColMajor>::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha, blocking);
355 // c = alpha*a*a' + beta*c for op = 'N'or'n'
356 // c = alpha*a'*a + beta*c for op = 'T'or't','C'or'c'
383 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
414 matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
416 matrix(c, *n, *n, *ldc).triangularView<Upper>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
421 matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*n,*k,*lda) * matrix(a,*n,*k,*lda).transpose();
423 matrix(c, *n, *n, *ldc).triangularView<Lower>() += alpha * matrix(a,*k,*n,*lda).transpose() * matrix(a,*k,*n,*lda);
429 func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha, blocking);
435 // c = alpha*a*b' + alpha*b*a' + beta*c for op = 'N'or'n'
436 // c = alpha*a'*b + alpha*b'*a + beta*c for op = 'T'or't'
443 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
446 // std::cerr << "in syr2k " << *uplo << " " << *op << " " << *n << " " << *k << " " << alpha << " " << *lda << " " << *ldb << " " << beta << " " << *ldc << "\n";
477 += alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
478 + alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
482 += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).transpose()
483 + alpha*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).transpose();
489 += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
490 + alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
493 += alpha*matrix(a, *k, *n, *lda).transpose()*matrix(b, *k, *n, *ldb)
494 + alpha*matrix(b, *k, *n, *ldb).transpose()*matrix(a, *k, *n, *lda);
503 // c = alpha*a*b + beta*c for side = 'L'or'l'
504 // c = alpha*b*a + beta*c for side = 'R'or'r
511 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
514 // std::cerr << "in hemm " << *side << " " << *uplo << " " << *m << " " << *n << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
541 ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha, blocking);
543 ::run(*m, *n, a, *lda, b, *ldb, c, *ldc, alpha, blocking);
548 if(UPLO(*uplo)==UP) matrix(c,*m,*n,*ldc) += alpha * matrix(b,*m,*n,*ldb) * matrix(a,*n,*n,*lda).selfadjointView<Upper>();/*internal::product_selfadjoint_matrix<Scalar,DenseIndex,ColMajor,false,false, RowMajor,true,Conj, ColMajor>
549 ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha, blocking);*/
551 ::run(*m, *n, b, *ldb, a, *lda, c, *ldc, alpha, blocking);
562 // c = alpha*a*conj(a') + beta*c for op = 'N'or'n'
563 // c = alpha*conj(a')*a + beta*c for op = 'C'or'c'
587 RealScalar alpha = *palpha;
590 // std::cerr << "in herk " << *uplo << " " << *op << " " << *n << " " << *k << " " << alpha << " " << *lda << " " << beta << " " << *ldc << "\n";
620 if(*k>0 && alpha!=RealScalar(0))
623 func[code](*n, *k, a, *lda, a, *lda, c, *ldc, alpha, blocking);
629 // c = alpha*a*conj(b') + conj(alpha)*b*conj(a') + beta*c, for op = 'N'or'n'
630 // c = alpha*conj(a')*b + conj(alpha)*conj(b')*a + beta*c, for op = 'C'or'c'
637 Scalar alpha = *reinterpret_cast<const Scalar*>(palpha);
640 // std::cerr << "in her2k " << *uplo << " " << *op << " " << *n << " " << *k << " " << alpha << " " << *lda << " " << *ldb << " " << beta << " " << *ldc << "\n";
668 else if(*k>0 && alpha!=Scalar(0))
679 += alpha *matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
680 + numext::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
684 += alpha*matrix(a, *n, *k, *lda)*matrix(b, *n, *k, *ldb).adjoint()
685 + numext::conj(alpha)*matrix(b, *n, *k, *ldb)*matrix(a, *n, *k, *lda).adjoint();
691 += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
692 + numext::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);
695 += alpha*matrix(a, *k, *n, *lda).adjoint()*matrix(b, *k, *n, *ldb)
696 + numext::conj(alpha)*matrix(b, *k, *n, *ldb).adjoint()*matrix(a, *k, *n, *lda);