• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
Aucun tag

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Commit MetaInfo

Révision59fd279a3a2908655d13e571db8901790fd500d5 (tree)
l'heure2013-10-18 11:10:38
AuteurMikiya Fujii <mikiya.fujii@gmai...>
CommiterMikiya Fujii

Message de Log

trunk.r1551 is merged to branches/fx10. Bug in handling of MolDSException in openMP/MP region is fixed. #32309 #32094

git-svn-id: https://svn.sourceforge.jp/svnroot/molds/branches/fx10@1554 1136aad2-a195-0410-b898-f5ea1d11b9d8

Change Summary

Modification

--- a/src/cndo/Cndo2.cpp
+++ b/src/cndo/Cndo2.cpp
@@ -1389,24 +1389,23 @@ void Cndo2::CalcFockMatrix(double** fockMatrix,
13891389 bool isGuess) const{
13901390 int totalNumberAOs = molecule.GetTotalNumberAOs();
13911391 int totalNumberAtoms = molecule.GetNumberAtoms();
1392+ MallocerFreer::GetInstance()->Initialize<double>(fockMatrix, totalNumberAOs, totalNumberAOs);
13921393
13931394 // MPI setting of each rank
13941395 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
13951396 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
13961397 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
1398+ stringstream errorStream;
13971399 MolDS_mpi::AsyncCommunicator asyncCommunicator;
1398- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
1399- &asyncCommunicator) );
1400+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
14001401
1401- MallocerFreer::GetInstance()->Initialize<double>(fockMatrix, totalNumberAOs, totalNumberAOs);
1402- for(int A=totalNumberAtoms-1; 0<=A; A--){
1402+ for(int A=0; A<totalNumberAtoms; A++){
14031403 const Atom& atomA = *molecule.GetAtom(A);
14041404 int firstAOIndexA = atomA.GetFirstAOIndex();
14051405 int lastAOIndexA = atomA.GetLastAOIndex();
14061406 for(int mu=firstAOIndexA; mu<=lastAOIndexA; mu++){
14071407 int calcRank = mu%mpiSize;
14081408 if(mpiRank == calcRank){
1409- stringstream ompErrors;
14101409 #pragma omp parallel for schedule(auto)
14111410 for(int B=A; B<totalNumberAtoms; B++){
14121411 try{
@@ -1444,36 +1443,34 @@ void Cndo2::CalcFockMatrix(double** fockMatrix,
14441443 else{
14451444 // lower left part (not calculated)
14461445 }
1447- } // end of loop nu
1448- } // end of try
1446+ }
1447+ }
14491448 catch(MolDSException ex){
1450-#pragma omp critical
1451- ex.Serialize(ompErrors);
1449+#pragma omp critical
1450+ ex.Serialize(errorStream);
14521451 }
1453- } // end of loop B parallelized with openMP
1454- // Exception throwing for omp-region
1455- if(!ompErrors.str().empty()){
1456- throw MolDSException::Deserialize(ompErrors);
14571452 }
1458- } // end of if(mpiRank == calcRank)
1459-
1460- // set data to gather in mpiHeadRank with asynchronous MPI
1461- int tag = mu;
1462- int source = calcRank;
1463- int dest = mpiHeadRank;
1464- double* buff = &fockMatrix[mu][mu];
1465- MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu;
1466- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1467- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
14681453 }
1469- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1470- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
1454+ if(errorStream.str().empty()){
1455+ int tag = mu;
1456+ int source = calcRank;
1457+ int dest = mpiHeadRank;
1458+ double* buff = &fockMatrix[mu][mu];
1459+ MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu;
1460+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1461+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
1462+ }
1463+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1464+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
1465+ }
14711466 }
1472- } // end of loop mu parallelized with MPI
1473- } // end of loop A
1474- // Delete the communication thread.
1467+ }
1468+ }
14751469 asyncCommunicator.Finalize();
14761470 communicationThread.join();
1471+ if(!errorStream.str().empty()){
1472+ throw MolDSException::Deserialize(errorStream);
1473+ }
14771474 double* buff = &fockMatrix[0][0];
14781475 MolDS_mpi::molds_mpi_int num = totalNumberAOs*totalNumberAOs;
14791476 MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
@@ -1609,11 +1606,10 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{
16091606 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
16101607 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
16111608 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
1609+ stringstream errorStream;
16121610 MolDS_mpi::AsyncCommunicator asyncCommunicator;
1613- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
1614- &asyncCommunicator) );
1611+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
16151612
1616- // This loop (A) is parallelized by MPI
16171613 for(int A=0; A<totalAtomNumber; A++){
16181614 int calcRank = A%mpiSize;
16191615 if(mpiRank == calcRank){
@@ -1621,7 +1617,6 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{
16211617 int na = atomA.GetValenceShellType() + 1;
16221618 double orbitalExponentA = atomA.GetOrbitalExponent(
16231619 atomA.GetValenceShellType(), s, this->theory);
1624- stringstream ompErrors;
16251620 #pragma omp parallel for schedule(auto)
16261621 for(int B=A; B<totalAtomNumber; B++){
16271622 try{
@@ -1675,30 +1670,29 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{
16751670 }
16761671 catch(MolDSException ex){
16771672 #pragma omp critical
1678- ex.Serialize(ompErrors);
1673+ ex.Serialize(errorStream);
16791674 }
1680- } // end of loop B parallelized by openMP
1681- // Exception throwing for omp-region
1682- if(!ompErrors.str().empty()){
1683- throw MolDSException::Deserialize(ompErrors);
16841675 }
1685- } // end of if(mpiRank==calcRank)
1686-
1687- // set data to gater in mpiHeadRank with asynchronous MPI
1688- int tag = A;
1689- int source = calcRank;
1690- int dest = mpiHeadRank;
1691- double* buff = &gammaAB[A][A];
1692- MolDS_mpi::molds_mpi_int num = totalAtomNumber-A;
1693- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1694- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
16951676 }
1696- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1697- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
1677+ if(errorStream.str().empty()){
1678+ int tag = A;
1679+ int source = calcRank;
1680+ int dest = mpiHeadRank;
1681+ double* buff = &gammaAB[A][A];
1682+ MolDS_mpi::molds_mpi_int num = totalAtomNumber-A;
1683+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1684+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
1685+ }
1686+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1687+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
1688+ }
16981689 }
1699- } // end of loop A prallelized by MPI
1690+ }
17001691 asyncCommunicator.Finalize();
17011692 communicationThread.join();
1693+ if(!errorStream.str().empty()){
1694+ throw MolDSException::Deserialize(errorStream);
1695+ }
17021696 double* buff = &gammaAB[0][0];
17031697 MolDS_mpi::molds_mpi_int num = totalAtomNumber*totalAtomNumber;
17041698 MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
@@ -1807,11 +1801,10 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix,
18071801 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
18081802 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
18091803 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
1804+ stringstream errorStream;
18101805 MolDS_mpi::AsyncCommunicator asyncCommunicator;
1811- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
1812- &asyncCommunicator) );
1806+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
18131807
1814- // This loop (A and mu) is parallelized by MPI
18151808 for(int A=0; A<totalAtomNumber; A++){
18161809 const Atom& atomA = *molecule.GetAtom(A);
18171810 int firstAOIndexA = atomA.GetFirstAOIndex();
@@ -1820,7 +1813,6 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix,
18201813 if(mpiRank == calcRank){
18211814 for(int a=0; a<numValenceAOsA; a++){
18221815 int mu = firstAOIndexA + a;
1823- stringstream ompErrors;
18241816 #pragma omp parallel for schedule(auto)
18251817 for(int B=0; B<totalAtomNumber; B++){
18261818 try{
@@ -1837,40 +1829,38 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix,
18371829 }
18381830 catch(MolDSException ex){
18391831 #pragma omp critical
1840- ex.Serialize(ompErrors);
1832+ ex.Serialize(errorStream);
18411833 }
1842- }// end of loop for int B with openMP
1843- // Exception throwing for omp-region
1844- if(!ompErrors.str().empty()){
1845- throw MolDSException::Deserialize(ompErrors);
1846- }
1847- }
1848- } // end lof if(mpiRank == calcRank)
1849-
1850- // set data to gater in mpiHeadRank with asynchronous MPI
1851- int tagX = A* CartesianType_end + XAxis;
1852- int tagY = A* CartesianType_end + YAxis;
1853- int tagZ = A* CartesianType_end + ZAxis;
1854- int source = calcRank;
1855- int dest = mpiHeadRank;
1856- double* buffX = &cartesianMatrix[XAxis][firstAOIndexA][0];
1857- double* buffY = &cartesianMatrix[YAxis][firstAOIndexA][0];
1858- double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0];
1859- MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber;
1860- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1861- asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX);
1862- asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY);
1863- asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ);
1864- }
1865- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1866- asyncCommunicator.SetSentMessage(buffX, num, dest, tagX);
1867- asyncCommunicator.SetSentMessage(buffY, num, dest, tagY);
1868- asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ);
1869- }
1870- } // end of loop for int A with MPI
1871- // Delete the communication thread.
1834+ }
1835+ }
1836+ }
1837+ if(errorStream.str().empty()){
1838+ int tagX = A* CartesianType_end + XAxis;
1839+ int tagY = A* CartesianType_end + YAxis;
1840+ int tagZ = A* CartesianType_end + ZAxis;
1841+ int source = calcRank;
1842+ int dest = mpiHeadRank;
1843+ double* buffX = &cartesianMatrix[XAxis][firstAOIndexA][0];
1844+ double* buffY = &cartesianMatrix[YAxis][firstAOIndexA][0];
1845+ double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0];
1846+ MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber;
1847+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1848+ asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX);
1849+ asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY);
1850+ asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ);
1851+ }
1852+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1853+ asyncCommunicator.SetSentMessage(buffX, num, dest, tagX);
1854+ asyncCommunicator.SetSentMessage(buffY, num, dest, tagY);
1855+ asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ);
1856+ }
1857+ }
1858+ }
18721859 asyncCommunicator.Finalize();
18731860 communicationThread.join();
1861+ if(!errorStream.str().empty()){
1862+ throw MolDSException::Deserialize(errorStream);
1863+ }
18741864 double* buff = &cartesianMatrix[0][0][0];
18751865 MolDS_mpi::molds_mpi_int num = CartesianType_end*totalAONumber*totalAONumber;
18761866 MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
@@ -3904,27 +3894,22 @@ void Cndo2::CalcOverlapESsWithAnotherElectronicStructure(double** overlapESs,
39043894 void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{
39053895 int totalAONumber = molecule.GetTotalNumberAOs();
39063896 int totalAtomNumber = molecule.GetNumberAtoms();
3897+ MallocerFreer::GetInstance()->Initialize<double>(overlapAOs, totalAONumber, totalAONumber);
39073898
39083899 // MPI setting of each rank
39093900 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
39103901 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
39113902 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
3903+ stringstream errorStream;
39123904 MolDS_mpi::AsyncCommunicator asyncCommunicator;
3913- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
3914- &asyncCommunicator) );
3915-
3916- MallocerFreer::GetInstance()->Initialize<double>(overlapAOs,
3917- totalAONumber,
3918- totalAONumber);
3905+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
39193906
3920- // This loop A is parallelized with MPI
3921- for(int A=totalAtomNumber-1; 0<=A; A--){
3907+ for(int A=0; A<totalAtomNumber; A++){
39223908 const Atom& atomA = *molecule.GetAtom(A);
39233909 int firstAOIndexA = atomA.GetFirstAOIndex();
39243910 int numValenceAOs = atomA.GetValenceSize();
39253911 int calcRank = A%mpiSize;
39263912 if(mpiRank == calcRank){
3927- stringstream ompErrors;
39283913 #pragma omp parallel
39293914 {
39303915 double** diatomicOverlapAOs = NULL;
@@ -3959,12 +3944,11 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{
39593944 this->CalcRotatingMatrix(rotatingMatrix, atomA, atomB);
39603945 this->RotateDiatmicOverlapAOsToSpaceFrame(diatomicOverlapAOs, rotatingMatrix, tmpDiatomicOverlapAOs, tmpOldDiatomicOverlapAOs, tmpMatrixBC, tmpVectorBC);
39613946 this->SetOverlapAOsElement(overlapAOs, diatomicOverlapAOs, atomA, atomB, symmetrize);
3962- } // end of loop B parallelized with openMP
3963-
3964- } // end of try
3947+ }
3948+ }
39653949 catch(MolDSException ex){
39663950 #pragma omp critical
3967- ex.Serialize(ompErrors);
3951+ ex.Serialize(errorStream);
39683952 }
39693953 this->FreeDiatomicOverlapAOsAndRotatingMatrix(&diatomicOverlapAOs, &rotatingMatrix);
39703954 MallocerFreer::GetInstance()->Free<double>(&tmpDiatomicOverlapAOs,
@@ -3977,28 +3961,27 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{
39773961 OrbitalType_end);
39783962 MallocerFreer::GetInstance()->Free<double>(&tmpVectorBC,
39793963 OrbitalType_end*OrbitalType_end);
3980- } // end of omp-parallelized region
3981- // Exception throwing for omp-region
3982- if(!ompErrors.str().empty()){
3983- throw MolDSException::Deserialize(ompErrors);
39843964 }
3985- } // end of if(mpiRank == calcRnak)
3986-
3987- // set data to gather in mpiHeadRank with asynchronous MPI
3988- int tag = A;
3989- int source = calcRank;
3990- int dest = mpiHeadRank;
3991- double* buff = overlapAOs[firstAOIndexA];
3992- MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs;
3993- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3994- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
3995- }
3996- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3997- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3965+ }
3966+ if(errorStream.str().empty()){
3967+ int tag = A;
3968+ int source = calcRank;
3969+ int dest = mpiHeadRank;
3970+ double* buff = overlapAOs[firstAOIndexA];
3971+ MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs;
3972+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3973+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
3974+ }
3975+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3976+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3977+ }
39983978 }
3999- } // end of loop A parallelized with MPI
3979+ }
40003980 asyncCommunicator.Finalize();
40013981 communicationThread.join();
3982+ if(!errorStream.str().empty()){
3983+ throw MolDSException::Deserialize(errorStream);
3984+ }
40023985 double* buff = &overlapAOs[0][0];
40033986 MolDS_mpi::molds_mpi_int num = totalAONumber*totalAONumber;
40043987 MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
--- a/src/mndo/Mndo.cpp
+++ b/src/mndo/Mndo.cpp
@@ -3487,30 +3487,28 @@ double Mndo::GetAuxiliaryKNRKRElement(int moI, int moJ, int moK, int moL) const{
34873487
34883488 void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore,
34893489 const Molecule& molecule) const{
3490- int totalNumberAtoms = molecule.GetNumberAtoms();
3491-
3492- // MPI setting of each rank
3493- int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
3494- int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
3495- int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
3496- MolDS_mpi::AsyncCommunicator asyncCommunicator;
3497- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
3498- &asyncCommunicator) );
34993490 #ifdef MOLDS_DBG
35003491 if(twoElecTwoCore == NULL){
35013492 throw MolDSException(this->errorMessageCalcTwoElecTwoCoreNullMatrix);
35023493 }
35033494 #endif
3495+ int totalNumberAtoms = molecule.GetNumberAtoms();
35043496 MallocerFreer::GetInstance()->Initialize<double>(twoElecTwoCore,
35053497 totalNumberAtoms,
35063498 totalNumberAtoms,
35073499 dxy, dxy, dxy, dxy);
35083500
3509- // this loop-a is MPI-parallelized
3510- for(int a=totalNumberAtoms-1; 0<=a; a--){
3501+ // MPI setting of each rank
3502+ int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
3503+ int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
3504+ int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
3505+ stringstream errorStream;
3506+ MolDS_mpi::AsyncCommunicator asyncCommunicator;
3507+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
3508+
3509+ for(int a=0; a<totalNumberAtoms; a++){
35113510 int calcRank = a%mpiSize;
35123511 if(mpiRank == calcRank){
3513- stringstream ompErrors;
35143512 #pragma omp parallel
35153513 {
35163514 double**** diatomicTwoElecTwoCore = NULL;
@@ -3533,14 +3531,12 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore,
35333531 tmpMatrixBC,
35343532 tmpVectorBC,
35353533 a, b);
3536-
35373534 int i=0;
35383535 for(int mu=0; mu<dxy; mu++){
35393536 for(int nu=mu; nu<dxy; nu++){
35403537 int j=0;
35413538 for(int lambda=0; lambda<dxy; lambda++){
35423539 for(int sigma=lambda; sigma<dxy; sigma++){
3543- //double value = diatomicTwoElecTwoCore[mu][nu][lambda][sigma];
35443540 this->twoElecTwoCoreMpiBuff[a][b][i][j]
35453541 = diatomicTwoElecTwoCore[mu][nu][lambda][sigma];
35463542 j++;
@@ -3549,36 +3545,34 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore,
35493545 i++;
35503546 }
35513547 }
3552-
3553- } // end of loop b parallelized with MPI
3554-
3555- } // end of try
3548+ }
3549+ }
35563550 catch(MolDSException ex){
35573551 #pragma omp critical
3558- ex.Serialize(ompErrors);
3552+ ex.Serialize(errorStream);
35593553 }
35603554 MallocerFreer::GetInstance()->Free<double>(&diatomicTwoElecTwoCore, dxy, dxy, dxy, dxy);
35613555 MallocerFreer::GetInstance()->Free<double>(&tmpDiatomicTwoElecTwoCore, dxy*dxy*dxy*dxy);
35623556 MallocerFreer::GetInstance()->Free<double>(&tmpRotMat, OrbitalType_end, OrbitalType_end);
35633557 MallocerFreer::GetInstance()->Free<double>(&tmpMatrixBC, dxy*dxy, dxy*dxy);
35643558 MallocerFreer::GetInstance()->Free<double>(&tmpVectorBC, dxy*dxy*dxy*dxy);
3565- } // end of omp-parallelized region
3566- // Exception throwing for omp-region
3567- if(!ompErrors.str().empty()){
3568- throw MolDSException::Deserialize(ompErrors);
35693559 }
3570- } // end of if(mpiRnak == calcRank)
3571- // set data to gather in mpiHeadRank with asynchronous MPI
3572- if(a<totalNumberAtoms-1){
3573- int b = a+1;
3574- OrbitalType twoElecLimit = dxy;
3575- int numBuff = (twoElecLimit+1)*twoElecLimit/2;
3576- int num = (totalNumberAtoms-b)*numBuff*numBuff;
3577- asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank);
35783560 }
3579- } // end of loop a parallelized with MPI
3561+ if(errorStream.str().empty()){
3562+ if(a<totalNumberAtoms-1){
3563+ int b = a+1;
3564+ OrbitalType twoElecLimit = dxy;
3565+ int numBuff = (twoElecLimit+1)*twoElecLimit/2;
3566+ int num = (totalNumberAtoms-b)*numBuff*numBuff;
3567+ asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank);
3568+ }
3569+ }
3570+ }
35803571 asyncCommunicator.Finalize();
35813572 communicationThread.join();
3573+ if(!errorStream.str().empty()){
3574+ throw MolDSException::Deserialize(errorStream);
3575+ }
35823576
35833577 #pragma omp parallel for schedule(auto)
35843578 for(int a=0; a<totalNumberAtoms; a++){
--- a/src/zindo/ZindoS.cpp
+++ b/src/zindo/ZindoS.cpp
@@ -2353,18 +2353,16 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{
23532353 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
23542354 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
23552355 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
2356+ stringstream errorStream;
23562357 MolDS_mpi::AsyncCommunicator asyncCommunicator;
2357- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
2358- &asyncCommunicator) );
2358+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
23592359
2360- // this loop-a is MPI-parallelized
2361- for(int k=this->matrixCISdimension-1; 0<=k; k--){
2360+ for(int k=0; k<this->matrixCISdimension; k++){
23622361 int calcRank = k%mpiSize;
23632362 if(calcRank == mpiRank){
23642363 // single excitation from I-th (occupied)MO to A-th (virtual)MO
23652364 int moI = this->GetActiveOccIndex(*this->molecule, k);
23662365 int moA = this->GetActiveVirIndex(*this->molecule, k);
2367- stringstream ompErrors;
23682366 #pragma omp parallel for schedule(auto)
23692367 for(int l=k; l<this->matrixCISdimension; l++){
23702368 try{
@@ -2401,30 +2399,29 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{
24012399 }
24022400 catch(MolDSException ex){
24032401 #pragma omp critical
2404- ex.Serialize(ompErrors);
2402+ ex.Serialize(errorStream);
24052403 }
2406- } // end of l-loop
2407- // Exception throwing for omp-region
2408- if(!ompErrors.str().empty()){
2409- throw MolDSException::Deserialize(ompErrors);
24102404 }
2411- } // end of if(calcRank == mpiRank)
2412- // Send data to head rank
2413- int tag = k;
2414- int source = calcRank;
2415- int dest = mpiHeadRank;
2416- int num = this->matrixCISdimension - k;
2417- double* buff = &this->matrixCIS[k][k];
2418- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
2419- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
24202405 }
2421- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
2422- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
2406+ if(errorStream.str().empty()){
2407+ int tag = k;
2408+ int source = calcRank;
2409+ int dest = mpiHeadRank;
2410+ int num = this->matrixCISdimension - k;
2411+ double* buff = &this->matrixCIS[k][k];
2412+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
2413+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
2414+ }
2415+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
2416+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
2417+ }
24232418 }
2424- } // end of k-loop which is MPI-parallelized
2419+ }
24252420 asyncCommunicator.Finalize();
24262421 communicationThread.join();
2427- // Broadcast data to all rank
2422+ if(!errorStream.str().empty()){
2423+ throw MolDSException::Deserialize(errorStream);
2424+ }
24282425 for(int k=0; k<this->matrixCISdimension; k++){
24292426 int num = this->matrixCISdimension - k;
24302427 double* buff = &this->matrixCIS[k][k];
@@ -3334,20 +3331,19 @@ void ZindoS::CalcAuxiliaryVector(double* y,
33343331 // Note taht K_{NR} is not calculated.
33353332 void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<MoIndexPair>& nonRedundantQIndeces) const{
33363333 int nonRedundantQIndecesSize = nonRedundantQIndeces.size();
3337- //MPI setting of each rank
3334+ // MPI setting of each rank
33383335 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
33393336 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
33403337 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
3338+ stringstream errorStream;
33413339 MolDS_mpi::AsyncCommunicator asyncCommunicator;
3342- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
3343- &asyncCommunicator) );
3344- // this loop-i is MPI-parallelized
3345- for(int i=nonRedundantQIndecesSize-1; 0<=i; i--){
3340+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
3341+
3342+ for(int i=0; i<nonRedundantQIndecesSize; i++){
33463343 int calcRank = i%mpiSize;
33473344 if(mpiRank == calcRank){
33483345 int moI = nonRedundantQIndeces[i].moI;
33493346 int moJ = nonRedundantQIndeces[i].moJ;
3350- stringstream ompErrors;
33513347 #pragma omp parallel for schedule(auto)
33523348 for(int j=i; j<nonRedundantQIndecesSize; j++){
33533349 try{
@@ -3358,30 +3354,29 @@ void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<Mo
33583354 } // end of try
33593355 catch(MolDSException ex){
33603356 #pragma omp critical
3361- ex.Serialize(ompErrors);
3357+ ex.Serialize(errorStream);
33623358 }
3363- } //end of loop j parallelized with openMP
3364- // Exception throwing for omp-region
3365- if(!ompErrors.str().empty()){
3366- throw MolDSException::Deserialize(ompErrors);
3367- }
3368- } /// end of if(mpiRnak == calcRank)
3369- // Send data to head rank
3370- int tag = i;
3371- int source = calcRank;
3372- int dest = mpiHeadRank;
3373- int num = nonRedundantQIndecesSize - i;
3374- double* buff = &gammaNRMinusKNR[i][i];
3375- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3376- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
3359+ }
33773360 }
3378- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3379- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3361+ if(errorStream.str().empty()){
3362+ int tag = i;
3363+ int source = calcRank;
3364+ int dest = mpiHeadRank;
3365+ int num = nonRedundantQIndecesSize - i;
3366+ double* buff = &gammaNRMinusKNR[i][i];
3367+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3368+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
3369+ }
3370+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3371+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3372+ }
33803373 }
3381- } // end of loop-i parallelized with MPI
3374+ }
33823375 asyncCommunicator.Finalize();
33833376 communicationThread.join();
3384- // broadcast data to all rank
3377+ if(!errorStream.str().empty()){
3378+ throw MolDSException::Deserialize(errorStream);
3379+ }
33853380 for(int i=0; i<nonRedundantQIndecesSize; i++){
33863381 int num = nonRedundantQIndecesSize - i;
33873382 double* buff = &gammaNRMinusKNR[i][i];
@@ -3397,20 +3392,19 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv,
33973392 const vector<MoIndexPair>& redundantQIndeces) const{
33983393 int nonRedundantQIndecesSize = nonRedundantQIndeces.size();
33993394 int redundantQIndecesSize = redundantQIndeces.size();
3400- //MPI setting of each rank
3395+ // MPI setting of each rank
34013396 int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank();
34023397 int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize();
34033398 int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank();
3399+ stringstream errorStream;
34043400 MolDS_mpi::AsyncCommunicator asyncCommunicator;
3405- boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>,
3406- &asyncCommunicator) );
3407- // this loop-i is MPI-parallelized
3401+ boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) );
3402+
34083403 for(int i=0; i<nonRedundantQIndecesSize; i++){
34093404 int calcRank = i%mpiSize;
34103405 if(mpiRank == calcRank){
34113406 int moI = nonRedundantQIndeces[i].moI;
34123407 int moJ = nonRedundantQIndeces[i].moJ;
3413- stringstream ompErrors;
34143408 #pragma omp parallel for schedule(auto)
34153409 for(int j=0; j<redundantQIndecesSize; j++){
34163410 try{
@@ -3421,30 +3415,29 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv,
34213415 } // end of try
34223416 catch(MolDSException ex){
34233417 #pragma omp critical
3424- ex.Serialize(ompErrors);
3418+ ex.Serialize(errorStream);
34253419 }
3426- } // end of loop-j parallelized with openMP
3427- // Exception throwing for omp-region
3428- if(!ompErrors.str().empty()){
3429- throw MolDSException::Deserialize(ompErrors);
34303420 }
3431- } // // end of if(mpiRnak == calcRank)
3432- // Send data to head rank
3433- int tag = i;
3434- int source = calcRank;
3435- int dest = mpiHeadRank;
3436- int num = redundantQIndecesSize;
3437- double* buff = &kRDagerGammaRInv[i][0];
3438- if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3439- asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
34403421 }
3441- if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3442- asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3422+ if(errorStream.str().empty()){
3423+ int tag = i;
3424+ int source = calcRank;
3425+ int dest = mpiHeadRank;
3426+ int num = redundantQIndecesSize;
3427+ double* buff = &kRDagerGammaRInv[i][0];
3428+ if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3429+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
3430+ }
3431+ if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3432+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
3433+ }
34433434 }
3444- } // end of loop-i parallelized with MPI
3435+ }
34453436 asyncCommunicator.Finalize();
34463437 communicationThread.join();
3447- // broadcast data to all rank
3438+ if(!errorStream.str().empty()){
3439+ throw MolDSException::Deserialize(errorStream);
3440+ }
34483441 for(int i=0; i<nonRedundantQIndecesSize; i++){
34493442 int num = redundantQIndecesSize;
34503443 double* buff = &kRDagerGammaRInv[i][0];