ComputeMgr Class Reference

#include <ComputeMgr.h>

List of all members.

Public Member Functions

 ComputeMgr ()
 ~ComputeMgr ()
void createComputes (ComputeMap *map)
void updateComputes (int, CkGroupID)
void updateComputes2 (CkQdMsg *)
void updateComputes3 ()
void splitComputes ()
void splitComputes2 (CkQdMsg *)
void updateLocalComputes ()
void updateLocalComputes2 (CkQdMsg *)
void updateLocalComputes3 ()
void updateLocalComputes4 (CkQdMsg *)
void updateLocalComputes5 ()
void doneUpdateLocalComputes ()
void sendComputeGlobalConfig (ComputeGlobalConfigMsg *)
void recvComputeGlobalConfig (ComputeGlobalConfigMsg *)
void sendComputeGlobalData (ComputeGlobalDataMsg *)
void recvComputeGlobalData (ComputeGlobalDataMsg *)
void sendComputeGlobalResults (ComputeGlobalResultsMsg *)
void recvComputeGlobalResults (ComputeGlobalResultsMsg *)
void enableComputeGlobalResults ()
void sendComputeDPMEData (ComputeDPMEDataMsg *)
void recvComputeDPMEData (ComputeDPMEDataMsg *)
void sendComputeDPMEResults (ComputeDPMEResultsMsg *, int)
void recvComputeDPMEResults (ComputeDPMEResultsMsg *)
void sendComputeEwaldData (ComputeEwaldMsg *)
void recvComputeEwaldData (ComputeEwaldMsg *)
void sendComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeConsForceMsg (ComputeConsForceMsg *)
void sendYieldDevice (int pe)
void recvYieldDevice (int pe)
void sendBuildCudaExclusions ()
void recvBuildCudaExclusions ()
void sendBuildCudaForceTable ()
void recvBuildCudaForceTable ()
void sendBuildMICForceTable ()
void recvBuildMICForceTable ()
void sendCreateNonbondedCUDASlave (int, int)
void recvCreateNonbondedCUDASlave (NonbondedCUDASlaveMsg *)
void sendNonbondedCUDASlaveReady (int, int, int, int)
void recvNonbondedCUDASlaveReady (int, int, int)
void sendNonbondedCUDASlaveSkip (ComputeNonbondedCUDA *c, int)
void recvNonbondedCUDASlaveSkip (NonbondedCUDASkipMsg *)
void sendNonbondedCUDASlaveEnqueue (ComputeNonbondedCUDA *c, int, int, int, int)
void sendNonbondedCUDASlaveEnqueuePatch (ComputeNonbondedCUDA *c, int, int, int, int, FinishWorkMsg *)
void sendAssignPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvAssignPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendSkipPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvSkipPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvFinishPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchOnPe (int pe, CudaComputeNonbonded *c, int i, PatchID patchID)
void recvFinishPatchOnPe (CudaComputeNonbondedMsg *msg)
void sendOpenBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvOpenBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishReductions (int pe, CudaComputeNonbonded *c)
void recvFinishReductions (CudaComputeNonbondedMsg *msg)
void sendMessageEnqueueWork (int pe, CudaComputeNonbonded *c)
void recvMessageEnqueueWork (CudaComputeNonbondedMsg *msg)
void sendLaunchWork (int pe, CudaComputeNonbonded *c)
void recvLaunchWork (CudaComputeNonbondedMsg *msg)
void sendUnregisterBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvUnregisterBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendCreateNonbondedMICSlave (int, int)
void recvCreateNonbondedMICSlave (NonbondedMICSlaveMsg *)
void sendNonbondedMICSlaveReady (int, int, int, int)
void recvNonbondedMICSlaveReady (int, int, int)
void sendNonbondedMICSlaveSkip (ComputeNonbondedMIC *c, int)
void recvNonbondedMICSlaveSkip (NonbondedMICSkipMsg *)
void sendNonbondedMICSlaveEnqueue (ComputeNonbondedMIC *c, int, int, int, int)
void sendMICPEData (int, int)
void recvMICPEData (int, int)
int isMICProcessor (int)

Public Attributes

ComputeGlobalcomputeGlobalObject
ResizeArray
< ComputeGlobalResultsMsg * > 
computeGlobalResultsMsgs
int computeGlobalResultsMsgSeq
int computeGlobalResultsMsgMasterSeq

Detailed Description

Definition at line 57 of file ComputeMgr.h.


Constructor & Destructor Documentation

ComputeMgr::ComputeMgr (  ) 

Definition at line 109 of file ComputeMgr.C.

References computeGlobalObject, computeGlobalResultsMsgMasterSeq, computeGlobalResultsMsgSeq, and NAMD_die().

00110 {
00111     CkpvAccess(BOCclass_group).computeMgr = thisgroup;
00112     computeGlobalObject = 0;
00113     computeGlobalResultsMsgSeq = -1;
00114     computeGlobalResultsMsgMasterSeq = -1;
00115     computeDPMEObject = 0;
00116     computeEwaldObject = 0;
00117     computeNonbondedCUDAObject = 0;
00118     computeNonbondedMICObject = 0;
00119     computeNonbondedWorkArrays = new ComputeNonbondedWorkArrays;
00120     skipSplitting = 0;
00121     masterServerObject = NULL;
00122 
00123     #if defined(NAMD_MIC)
00124       // Create the micPEData flag array (1 bit per PE) and initially set each PE as "not driving
00125       //   a MIC card" (unset).  PEs that are driving MIC card will identify themselves during startup.
00126       int numPEs = CkNumPes();
00127       int numInts = ((numPEs + (sizeof(int)*8-1)) & (~(sizeof(int)*8-1))) / (sizeof(int)*8);  // Round up to sizeof(int) then divide by the size of an int
00128       micPEData = new int[numInts];
00129       if (micPEData == NULL) { NAMD_die("Unable to allocate memory for micPEData"); }
00130       memset(micPEData, 0, sizeof(int) * numInts);
00131     #else
00132       micPEData = NULL;
00133     #endif
00134 }

ComputeMgr::~ComputeMgr ( void   ) 

Definition at line 136 of file ComputeMgr.C.

00137 {
00138     delete computeNonbondedWorkArrays;
00139     if (masterServerObject != NULL) delete masterServerObject;
00140 }


Member Function Documentation

void ComputeMgr::createComputes ( ComputeMap map  ) 

Definition at line 1009 of file ComputeMgr.C.

References GlobalMasterServer::addClient(), ComputeNonbondedMIC::assignPatches(), ComputeNonbondedCUDA::assignPatches(), CudaComputeNonbonded::assignPatches(), SimParameters::bondedCUDA, SimParameters::colvarsOn, computeAnglesType, computeBondsType, computeCrosstermsType, computeDihedralsType, computeExclsType, computeImpropersType, computeNonbondedCUDA2Type, computeNonbondedCUDAType, computeNonbondedMICType, computeNonbondedPairType, computeNonbondedSelfType, computeSelfAnglesType, computeSelfBondsType, computeSelfCrosstermsType, computeSelfDihedralsType, computeSelfExclsType, computeSelfImpropersType, DebugM, DeviceCUDA::device_shared_with_pe(), deviceCUDA, SimParameters::firstTimestep, SimParameters::freeEnergyOn, getCudaComputeNonbonded(), DeviceCUDA::getMasterPe(), SimParameters::globalForcesOn, SimParameters::IMDignore, SimParameters::IMDignoreForces, SimParameters::IMDon, CudaComputeNonbonded::initialize(), j, mic_device_pe(), mic_device_shared_with_pe(), SimParameters::miscForcesOn, Node::molecule, Node::myid(), Molecule::numAtoms, PatchMap::Object(), Node::Object(), Node::simParameters, SimParameters::SMDDir, SimParameters::SMDFile, SimParameters::SMDk, SimParameters::SMDk2, SimParameters::SMDOn, SimParameters::SMDOutputFreq, SimParameters::SMDVel, SimParameters::symmetryLastStep, SimParameters::symmetryOn, SimParameters::tclForcesOn, SimParameters::TMDOn, ComputeMap::type(), and SimParameters::useCUDA2.

Referenced by Node::startup().

01010 {
01011 // #ifdef NAMD_CUDA
01012 //     int ComputePmeCUDACounter = 0;
01013 // #endif
01014     Node *node = Node::Object();
01015     SimParameters *simParams = node->simParameters;
01016     int myNode = node->myid();
01017 
01018     if ( simParams->globalForcesOn && !myNode )
01019     {
01020         DebugM(4,"Mgr running on Node "<<CkMyPe()<<"\n");
01021         /* create a master server to allow multiple masters */
01022         masterServerObject = new GlobalMasterServer(this,
01023                 PatchMap::Object()->numNodesWithPatches());
01024 
01025         /* create the individual global masters */
01026         // masterServerObject->addClient(new GlobalMasterTest());
01027         if (simParams->tclForcesOn)
01028             masterServerObject->addClient(new GlobalMasterTcl());
01029         if (simParams->IMDon && ! (simParams->IMDignore || simParams->IMDignoreForces) )
01030             masterServerObject->addClient(new GlobalMasterIMD());
01031 
01032         if (simParams->SMDOn)
01033             masterServerObject->addClient(
01034                 new GlobalMasterSMD(simParams->SMDk, simParams->SMDk2,
01035                                     simParams->SMDVel,
01036                                     simParams->SMDDir, simParams->SMDOutputFreq,
01037                                     simParams->firstTimestep, simParams->SMDFile,
01038                                     node->molecule->numAtoms)
01039             );
01040             
01041         if (simParams->symmetryOn && 
01042           (simParams->firstTimestep < simParams->symmetryLastStep || 
01043           simParams->symmetryLastStep == -1))
01044             masterServerObject->addClient(new GlobalMasterSymmetry());    
01045         if (simParams->TMDOn)
01046             masterServerObject->addClient(new GlobalMasterTMD());
01047         if (simParams->miscForcesOn)
01048             masterServerObject->addClient(new GlobalMasterMisc());
01049         if ( simParams->freeEnergyOn )
01050             masterServerObject->addClient(new GlobalMasterFreeEnergy());
01051                 if ( simParams->colvarsOn )
01052                         masterServerObject->addClient(new GlobalMasterColvars());
01053 
01054     }
01055 
01056     if ( !myNode && simParams->IMDon && (simParams->IMDignore || simParams->IMDignoreForces) ) {
01057       // GlobalMasterIMD constructor saves pointer to node->IMDOutput object
01058       new GlobalMasterIMD();
01059     }
01060 
01061 #ifdef NAMD_CUDA
01062     bool deviceIsMine = ( deviceCUDA->getMasterPe() == CkMyPe() );
01063 #ifdef BONDED_CUDA
01064     // Place bonded forces on Pe different from non-bonded forces
01065     int bondedMasterPe = deviceCUDA->getMasterPe();
01066     // for (int i=0;i < deviceCUDA->getNumPesSharingDevice();i++) {
01067     //   int pe = deviceCUDA->getPesSharingDevice(i);
01068     //   if (pe != deviceCUDA->getMasterPe()) {
01069     //     bondedMasterPe = pe;
01070     //   }
01071     // }
01072     bool deviceIsMineBonded = (CkMyPe() == bondedMasterPe);
01073 #endif
01074 #endif
01075 
01076     #ifdef NAMD_MIC
01077       bool deviceIsMine = ( mic_device_pe() == CkMyPe() );
01078     #endif
01079 
01080     for (int i=0; i < map->nComputes; i++)
01081     {
01082         if ( ! ( i % 100 ) )
01083         {
01084         }
01085 
01086 #if defined(NAMD_CUDA) || defined(NAMD_MIC)
01087         switch ( map->type(i) )
01088         {
01089 #ifdef NAMD_CUDA
01090           // case computePmeCUDAType:
01091           //   // Only create single ComputePmeCUDA object per Pe
01092           //  if ( map->computeData[i].node != myNode ) continue;
01093           //  if (ComputePmeCUDACounter > 0) continue;
01094           //  ComputePmeCUDACounter++;
01095           //  break;
01096           case computeNonbondedSelfType:
01097             if ( ! deviceIsMine ) continue;
01098             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01099           break;
01100 
01101           case computeNonbondedPairType:
01102             if ( ! deviceIsMine ) continue;
01103             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01104           break;
01105 
01106 #ifdef BONDED_CUDA
01107           case computeSelfBondsType:
01108           case computeBondsType:
01109             if (simParams->bondedCUDA & 1) {
01110               if ( ! deviceIsMineBonded ) continue;
01111               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01112             } else {
01113               if ( map->computeData[i].node != myNode ) continue;
01114             }
01115           break;
01116 
01117           case computeSelfAnglesType:
01118           case computeAnglesType:
01119             if (simParams->bondedCUDA & 2) {
01120               if ( ! deviceIsMineBonded ) continue;
01121               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01122             } else {
01123               if ( map->computeData[i].node != myNode ) continue;
01124             }
01125           break;
01126 
01127           case computeSelfDihedralsType:
01128           case computeDihedralsType:
01129             if (simParams->bondedCUDA & 4) {
01130               if ( ! deviceIsMineBonded ) continue;
01131               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01132             } else {
01133               if ( map->computeData[i].node != myNode ) continue;
01134             }
01135           break;
01136 
01137           case computeSelfImpropersType:
01138           case computeImpropersType:
01139             if (simParams->bondedCUDA & 8) {
01140               if ( ! deviceIsMineBonded ) continue;
01141               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01142             } else {
01143               if ( map->computeData[i].node != myNode ) continue;
01144             }
01145           break;
01146 
01147           case computeSelfExclsType:
01148           case computeExclsType:
01149             if (simParams->bondedCUDA & 16) {
01150               if ( ! deviceIsMineBonded ) continue;
01151               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01152             } else {
01153               if ( map->computeData[i].node != myNode ) continue;
01154             }
01155           break;
01156 
01157           case computeSelfCrosstermsType:
01158           case computeCrosstermsType:
01159             if (simParams->bondedCUDA & 32) {
01160               if ( ! deviceIsMineBonded ) continue;
01161               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01162             } else {
01163               if ( map->computeData[i].node != myNode ) continue;
01164             }
01165           break;
01166 
01167           case computeBondedCUDAType:
01168             if ( ! deviceIsMineBonded ) continue;
01169             if ( map->computeData[i].node != myNode ) continue;
01170           break;
01171 #endif
01172 
01173 #endif
01174 #ifdef NAMD_MIC
01175 
01176           case computeNonbondedSelfType:
01177             if (map->directToDevice(i) != 0) { // If should be directed to the device...
01178               if ( ! deviceIsMine ) continue;
01179               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01180             } else { // ... otherwise, direct to host...
01181               if (map->computeData[i].node != myNode) { continue; }
01182             }
01183             break;
01184 
01185           case computeNonbondedPairType:
01186             if (map->directToDevice(i)) { // If should be directed to the device...
01187               if ( ! deviceIsMine ) continue;
01188               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01189             } else { // ... otherwise, direct to host...
01190               if (map->computeData[i].node != myNode) { continue; }
01191             }
01192             break;
01193 
01194 #endif
01195           case computeNonbondedCUDAType:
01196 #ifdef NAMD_CUDA
01197           case computeNonbondedCUDA2Type:
01198 // #ifdef BONDED_CUDA
01199 //           case computeBondedCUDAType:
01200 // #endif
01201 #endif
01202           case computeNonbondedMICType:
01203             if ( ! deviceIsMine ) continue;
01204           default:
01205             if ( map->computeData[i].node != myNode ) continue;
01206         }
01207 #else // defined(NAMD_CUDA) || defined(NAMD_MIC)
01208         if ( map->computeData[i].node != myNode ) continue;
01209 #endif
01210         DebugM(1,"Compute " << i << '\n');
01211         DebugM(1,"  node = " << map->computeData[i].node << '\n');
01212         DebugM(1,"  type = " << map->computeData[i].type << '\n');
01213         DebugM(1,"  numPids = " << map->computeData[i].numPids << '\n');
01214 //         DebugM(1,"  numPidsAllocated = " << map->computeData[i].numPidsAllocated << '\n');
01215         for (int j=0; j < map->computeData[i].numPids; j++)
01216         {
01217             DebugM(1,"  pid " << map->computeData[i].pids[j].pid << '\n');
01218             if (!((j+1) % 6))
01219                 DebugM(1,'\n');
01220         }
01221         DebugM(1,"\n---------------------------------------");
01222         DebugM(1,"---------------------------------------\n");
01223 
01224         createCompute(i, map);
01225 
01226     }
01227 
01228 #ifdef NAMD_CUDA
01229     if (simParams->useCUDA2) {
01230       if (deviceIsMine) {
01231         getCudaComputeNonbonded()->assignPatches(this);
01232         getCudaComputeNonbonded()->initialize();
01233       }
01234     } else {
01235       if ( computeNonbondedCUDAObject ) {
01236         computeNonbondedCUDAObject->assignPatches();
01237       }      
01238     }
01239 #ifdef BONDED_CUDA
01240     if (simParams->bondedCUDA) {
01241       if (deviceIsMineBonded) {
01242         getComputeBondedCUDA()->initialize();
01243       }
01244     }
01245 #endif
01246 #endif
01247 #ifdef NAMD_MIC
01248     if ( computeNonbondedMICObject ) {
01249       computeNonbondedMICObject->assignPatches();
01250     }
01251 #endif
01252 
01253 }

void ComputeMgr::doneUpdateLocalComputes (  ) 

Definition at line 348 of file ComputeMgr.C.

References DebugM.

00349 {
00350 
00351 //  if (!--updateComputesCount) {
00352     DebugM(4, "doneUpdateLocalComputes on Pe("<<CkMyPe()<<")\n");
00353     void *msg = CkAllocMsg(0,0,0);
00354     CkSendMsgBranch(updateComputesReturnEP,msg,0,updateComputesReturnChareID);
00355 //  }
00356 }

void ComputeMgr::enableComputeGlobalResults (  ) 
int ComputeMgr::isMICProcessor ( int  pe  ) 

Definition at line 1885 of file ComputeMgr.C.

01885                                      {
01886   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return 0; }
01887   int majorIndex = pe / (sizeof(int)*8);
01888   int minorIndex = pe % (sizeof(int)*8);
01889   return ((micPEData[majorIndex] >> minorIndex) & 0x01);
01890 }

void ComputeMgr::recvAssignPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1594 of file ComputeMgr.C.

References CudaComputeNonbonded::assignPatchesOnPe(), and CudaComputeNonbondedMsg::c.

01594                                                                    {
01595   msg->c->assignPatchesOnPe();
01596   delete msg;
01597 }

void ComputeMgr::recvBuildCudaExclusions (  ) 

Definition at line 1461 of file ComputeMgr.C.

References build_cuda_exclusions().

01461                                          {
01462 #ifdef NAMD_CUDA
01463     build_cuda_exclusions();
01464 #endif
01465 }

void ComputeMgr::recvBuildCudaForceTable (  ) 

Definition at line 1480 of file ComputeMgr.C.

References build_cuda_force_table().

01480                                          {
01481 #ifdef NAMD_CUDA
01482     build_cuda_force_table();
01483 #endif
01484 }

void ComputeMgr::recvBuildMICForceTable (  ) 

Definition at line 1499 of file ComputeMgr.C.

01499                                         {
01500   #ifdef NAMD_MIC
01501     build_mic_force_table();
01502   #endif
01503 }

void ComputeMgr::recvComputeConsForceMsg ( ComputeConsForceMsg msg  ) 

Definition at line 1408 of file ComputeMgr.C.

References ComputeConsForceMsg::aid, Molecule::consForce, Molecule::consForceIndexes, ComputeConsForceMsg::f, for(), Node::molecule, Molecule::numAtoms, Node::Object(), and ResizeArray< Elem >::size().

01409 {
01410     Molecule *m = Node::Object()->molecule;
01411     delete [] m->consForceIndexes;
01412     delete [] m->consForce;
01413     int n = msg->aid.size();
01414     if (n > 0)
01415     {
01416         m->consForceIndexes = new int32[m->numAtoms];
01417         m->consForce = new Vector[n];
01418         int i;
01419         for (i=0; i<m->numAtoms; i++) m->consForceIndexes[i] = -1;
01420         for (i=0; i<msg->aid.size(); i++)
01421         {
01422             m->consForceIndexes[msg->aid[i]] = i;
01423             m->consForce[i] = msg->f[i];
01424         }
01425     }
01426     else
01427     {
01428         m->consForceIndexes = NULL;
01429         m->consForce = NULL;
01430     }
01431     delete msg;
01432 }

void ComputeMgr::recvComputeDPMEData ( ComputeDPMEDataMsg msg  ) 

Definition at line 1378 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01379 {
01380     if ( computeDPMEObject )
01381     {
01382 #ifdef DPME
01383         computeDPMEObject->recvData(msg);
01384 #endif
01385     }
01386     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01387     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01388 }

void ComputeMgr::recvComputeDPMEResults ( ComputeDPMEResultsMsg msg  ) 

Definition at line 1396 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01397 {
01398     if ( computeDPMEObject )
01399     {
01400 #ifdef DPME
01401         computeDPMEObject->recvResults(msg);
01402 #endif
01403     }
01404     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01405     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01406 }

void ComputeMgr::recvComputeEwaldData ( ComputeEwaldMsg msg  ) 

Definition at line 1341 of file ComputeMgr.C.

References NAMD_die(), and ComputeEwald::recvData().

01342 {
01343     if (computeEwaldObject)
01344         computeEwaldObject->recvData(msg);
01345     else NAMD_die("ComputeMgr::computeEwaldObject in recvData is NULL!");
01346 }

void ComputeMgr::recvComputeEwaldResults ( ComputeEwaldMsg msg  ) 

Definition at line 1353 of file ComputeMgr.C.

References NAMD_die(), PatchMap::Object(), and ComputeEwald::recvResults().

Referenced by sendComputeEwaldResults().

01354 {
01355     if (computeEwaldObject) {
01356         CmiEnableUrgentSend(1);
01357         computeEwaldObject->recvResults(msg);
01358         CmiEnableUrgentSend(0);
01359     }
01360     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01361     else NAMD_die("ComputeMgr::computeEwaldObject in recvResults is NULL!");
01362 }

void ComputeMgr::recvComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 
void ComputeMgr::recvComputeGlobalData ( ComputeGlobalDataMsg msg  ) 

Definition at line 1278 of file ComputeMgr.C.

References NAMD_die(), and GlobalMasterServer::recvData().

01279 {
01280     if (masterServerObject)  // make sure it has been initialized
01281     {
01282         masterServerObject->recvData(msg);
01283     }
01284     else NAMD_die("ComputeMgr::masterServerObject is NULL!");
01285 }

void ComputeMgr::recvComputeGlobalResults ( ComputeGlobalResultsMsg msg  ) 

Definition at line 1306 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), computeGlobalObject, computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, NAMD_die(), PatchMap::Object(), ComputeGlobal::recvResults(), and ComputeGlobalResultsMsg::seq.

Referenced by enableComputeGlobalResults().

01307 {
01308     if ( computeGlobalObject )
01309     {
01310       if ( msg->seq == computeGlobalResultsMsgSeq ) {
01311         CmiEnableUrgentSend(1);
01312         computeGlobalObject->recvResults(msg);
01313         CmiEnableUrgentSend(0);
01314       } else {
01315         computeGlobalResultsMsgs.add(msg);
01316       }
01317     }
01318     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01319     else NAMD_die("ComputeMgr::computeGlobalObject is NULL!");
01320 }

void ComputeMgr::recvCreateNonbondedCUDASlave ( NonbondedCUDASlaveMsg msg  ) 

Definition at line 1519 of file ComputeMgr.C.

References Compute::cid, NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

01519                                                                         {
01520 #ifdef NAMD_CUDA
01521   new ComputeNonbondedCUDA(msg->master->cid,this,msg->master,msg->index);
01522 #endif
01523 }

void ComputeMgr::recvCreateNonbondedMICSlave ( NonbondedMICSlaveMsg msg  ) 

Definition at line 1819 of file ComputeMgr.C.

References Compute::cid, NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01819                                                                       {
01820 #ifdef NAMD_MIC
01821   ComputeNonbondedMIC *c = new ComputeNonbondedMIC(msg->master->cid,this,msg->master,msg->index);
01822 #endif
01823 }

void ComputeMgr::recvFinishPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1621 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishPatchesOnPe().

01621                                                                    {
01622   msg->c->finishPatchesOnPe();
01623   delete msg;
01624 }

void ComputeMgr::recvFinishPatchOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1634 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, CudaComputeNonbonded::finishPatchOnPe(), and CudaComputeNonbondedMsg::i.

01634                                                                  {
01635   msg->c->finishPatchOnPe(msg->i);
01636   delete msg;
01637 }

void ComputeMgr::recvFinishReductions ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1659 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishReductions().

01659                                                                   {
01660   msg->c->finishReductions();
01661   delete msg;
01662 }

void ComputeMgr::recvLaunchWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1681 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::launchWork().

01681                                                             {
01682   msg->c->launchWork();
01683   delete msg;
01684 }

void ComputeMgr::recvMessageEnqueueWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1670 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::messageEnqueueWork().

01670                                                                     {
01671   msg->c->messageEnqueueWork();
01672   delete msg;
01673 }

void ComputeMgr::recvMICPEData ( int  pe,
int  data 
)

Definition at line 1870 of file ComputeMgr.C.

01870                                                {
01871   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return; }
01872   int majorIndex = pe / (sizeof(int)*8);
01873   int minorIndex = pe % (sizeof(int)*8);
01874   if (data != 0) {
01875     micPEData[majorIndex] |= (0x01 << minorIndex);
01876   } else {
01877     micPEData[majorIndex] &= ((~0x01) << minorIndex);
01878   }
01879 }

void ComputeMgr::recvNonbondedCUDASlaveReady ( int  np,
int  ac,
int  seq 
)

Definition at line 1530 of file ComputeMgr.C.

References Compute::patchReady().

01530                                                                     {
01531   for ( int i=0; i<np; ++i ) {
01532     computeNonbondedCUDAObject->patchReady(-1,ac,seq);
01533   }
01534 }

void ComputeMgr::recvNonbondedCUDASlaveSkip ( NonbondedCUDASkipMsg msg  ) 

Definition at line 1547 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute, and ComputeNonbondedCUDA::skip().

01547                                                                      {
01548 #ifdef NAMD_CUDA
01549   msg->compute->skip();
01550 #endif
01551   delete msg;
01552 }

void ComputeMgr::recvNonbondedMICSlaveReady ( int  np,
int  ac,
int  seq 
)

Definition at line 1830 of file ComputeMgr.C.

References Compute::patchReady().

01830                                                                    {
01831   for ( int i=0; i<np; ++i ) {
01832     computeNonbondedMICObject->patchReady(-1,ac,seq);
01833   }
01834 }

void ComputeMgr::recvNonbondedMICSlaveSkip ( NonbondedMICSkipMsg msg  ) 

Definition at line 1847 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute, and ComputeNonbondedMIC::skip().

01847                                                                    {
01848 #ifdef NAMD_MIC
01849   msg->compute->skip();
01850 #endif
01851   delete msg;
01852 }

void ComputeMgr::recvOpenBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1648 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::openBoxesOnPe().

01648                                                                {
01649   msg->c->openBoxesOnPe();
01650   delete msg;
01651 }

void ComputeMgr::recvSkipPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1607 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::skipPatchesOnPe().

01607                                                                  {
01608   msg->c->skipPatchesOnPe();
01609   delete msg;
01610 }

void ComputeMgr::recvUnregisterBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1694 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::unregisterBoxesOnPe().

01694                                                                      {
01695   msg->c->unregisterBoxesOnPe();
01696   delete msg;
01697 }

void ComputeMgr::recvYieldDevice ( int  pe  ) 

Definition at line 1439 of file ComputeMgr.C.

References ComputeNonbondedMIC::recvYieldDevice(), and ComputeNonbondedCUDA::recvYieldDevice().

01439                                        {
01440 #ifdef NAMD_CUDA
01441     computeNonbondedCUDAObject->recvYieldDevice(pe);
01442 #endif
01443 #ifdef NAMD_MIC
01444     computeNonbondedMICObject->recvYieldDevice(pe);
01445 #endif
01446 }

void ComputeMgr::sendAssignPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1586 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::assignPatches().

01586                                                                                    {
01587   for (int i=0;i < pes.size();i++) {
01588     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01589     msg->c = c;
01590     thisProxy[pes[i]].recvAssignPatchesOnPe(msg);
01591   }
01592 }

void ComputeMgr::sendBuildCudaExclusions (  ) 

Definition at line 1448 of file ComputeMgr.C.

Referenced by Node::resendMolecule().

01448                                          {
01449     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01450     int pe = CkNodeFirst(CkMyNode());
01451     int end = pe + CkNodeSize(CkMyNode());
01452     for( ; pe != end; ++pe ) {
01453       cm[pe].recvBuildCudaExclusions();
01454     }
01455 }

void ComputeMgr::sendBuildCudaForceTable (  ) 

Definition at line 1467 of file ComputeMgr.C.

Referenced by send_build_cuda_force_table().

01467                                          {
01468     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01469     int pe = CkNodeFirst(CkMyNode());
01470     int end = pe + CkNodeSize(CkMyNode());
01471     for( ; pe != end; ++pe ) {
01472       cm[pe].recvBuildCudaForceTable();
01473     }
01474 }

void ComputeMgr::sendBuildMICForceTable (  ) 

Definition at line 1486 of file ComputeMgr.C.

01486                                         {
01487   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01488   int pe = CkNodeFirst(CkMyNode());
01489   int end = pe + CkNodeSize(CkMyNode());
01490   for( ; pe != end; ++pe ) {
01491     cm[pe].recvBuildMICForceTable();
01492   }
01493 }

void ComputeMgr::sendComputeDPMEData ( ComputeDPMEDataMsg msg  ) 

Definition at line 1364 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01365 {
01366     if ( computeDPMEObject )
01367     {
01368 #ifdef DPME
01369         int node = computeDPMEObject->getMasterNode();
01370         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01371         cm.recvComputeDPMEData(msg,node);
01372 #endif
01373     }
01374     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01375     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01376 }

void ComputeMgr::sendComputeDPMEResults ( ComputeDPMEResultsMsg msg,
int  node 
)

Definition at line 1390 of file ComputeMgr.C.

01391 {
01392     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01393     cm[node].recvComputeDPMEResults(msg);
01394 }

void ComputeMgr::sendComputeEwaldData ( ComputeEwaldMsg msg  ) 

Definition at line 1325 of file ComputeMgr.C.

References ComputeEwald::getMasterNode(), NAMD_die(), and PatchMap::Object().

Referenced by ComputeEwald::doWork().

01326 {
01327     if (computeEwaldObject)
01328     {
01329         int node = computeEwaldObject->getMasterNode();
01330         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01331         cm[node].recvComputeEwaldData(msg);
01332     }
01333     else if (!PatchMap::Object()->numHomePatches())
01334     {
01335         CkPrintf("skipping message on Pe(%d)\n", CkMyPe());
01336         delete msg;
01337     }
01338     else NAMD_die("ComputeMgr::computeEwaldObject is NULL!");
01339 }

void ComputeMgr::sendComputeEwaldResults ( ComputeEwaldMsg msg  ) 

Definition at line 1348 of file ComputeMgr.C.

References recvComputeEwaldResults().

Referenced by ComputeEwald::recvData().

01349 {
01350     (CProxy_ComputeMgr(CkpvAccess(BOCclass_group).computeMgr)).recvComputeEwaldResults(msg);
01351 }

void ComputeMgr::sendComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 
void ComputeMgr::sendComputeGlobalData ( ComputeGlobalDataMsg msg  ) 

Definition at line 1272 of file ComputeMgr.C.

Referenced by ComputeGlobal::doWork().

01273 {
01274     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01275     cm[0].recvComputeGlobalData(msg);
01276 }

void ComputeMgr::sendComputeGlobalResults ( ComputeGlobalResultsMsg msg  ) 

Definition at line 1287 of file ComputeMgr.C.

References computeGlobalResultsMsgMasterSeq, and ComputeGlobalResultsMsg::seq.

01288 {
01289     msg->seq = ++computeGlobalResultsMsgMasterSeq;
01290     thisProxy.recvComputeGlobalResults(msg);
01291 }

void ComputeMgr::sendCreateNonbondedCUDASlave ( int  pe,
int  index 
)

Definition at line 1511 of file ComputeMgr.C.

References NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

Referenced by ComputeNonbondedCUDA::assignPatches().

01511                                                                {
01512   NonbondedCUDASlaveMsg *msg = new NonbondedCUDASlaveMsg;
01513   msg->master = computeNonbondedCUDAObject;
01514   msg->index = index;
01515   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01516   cm[pe].recvCreateNonbondedCUDASlave(msg);
01517 }

void ComputeMgr::sendCreateNonbondedMICSlave ( int  pe,
int  index 
)

Definition at line 1811 of file ComputeMgr.C.

References NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01811                                                               {
01812   NonbondedMICSlaveMsg *msg = new NonbondedMICSlaveMsg;
01813   msg->master = computeNonbondedMICObject;
01814   msg->index = index;
01815   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01816   cm[pe].recvCreateNonbondedMICSlave(msg);
01817 }

void ComputeMgr::sendFinishPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1612 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

01612                                                                                    {
01613   for (int i=0;i < pes.size();i++) {
01614     CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01615     SET_PRIORITY(msg, c->sequence(), COMPUTE_PROXY_PRIORITY);
01616     msg->c = c;
01617     thisProxy[pes[i]].recvFinishPatchesOnPe(msg);
01618   }
01619 }

void ComputeMgr::sendFinishPatchOnPe ( int  pe,
CudaComputeNonbonded c,
int  i,
PatchID  patchID 
)

Definition at line 1626 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, CudaComputeNonbondedMsg::i, PATCH_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

01626                                                                                             {
01627   CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01628   SET_PRIORITY(msg, c->sequence(), COMPUTE_PROXY_PRIORITY + PATCH_PRIORITY(patchID));
01629   msg->c = c;
01630   msg->i = i;
01631   thisProxy[pe].recvFinishPatchOnPe(msg);
01632 }

void ComputeMgr::sendFinishReductions ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1653 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::skipPatchesOnPe().

01653                                                                      {
01654   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01655   msg->c = c;
01656   thisProxy[pe].recvFinishReductions(msg);
01657 }

void ComputeMgr::sendLaunchWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1675 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::openBoxesOnPe().

01675                                                                {
01676   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01677   msg->c = c;
01678   thisProxy[pe].recvLaunchWork(msg);
01679 }

void ComputeMgr::sendMessageEnqueueWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1664 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::noWork().

01664                                                                        {
01665   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01666   msg->c = c;
01667   thisProxy[pe].recvMessageEnqueueWork(msg);
01668 }

void ComputeMgr::sendMICPEData ( int  pe,
int  data 
)

Definition at line 1865 of file ComputeMgr.C.

01865                                                {
01866   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01867   cm.recvMICPEData(pe, data);
01868 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueue ( ComputeNonbondedCUDA c,
int  pe,
int  seq,
int  prio,
int  ws 
)

Definition at line 1554 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedCUDA::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedCUDA::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

Referenced by ComputeNonbondedCUDA::finishWork().

01554                                                                                                          {
01555   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01556   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01557   msg->compute = c;
01558   int type = c->type();
01559   int cid = c->cid;
01560   SET_PRIORITY(msg,seq,prio);
01561   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01562   wdProxy[pe].enqueueCUDA(msg);
01563 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueuePatch ( ComputeNonbondedCUDA c,
int  pe,
int  seq,
int  prio,
int  data,
FinishWorkMsg msg 
)

Definition at line 1565 of file ComputeMgr.C.

References FinishWorkMsg::compute, FinishWorkMsg::data, and SET_PRIORITY.

Referenced by ComputeNonbondedCUDA::messageFinishPatch().

01565                                                                                                                                     {
01566   msg->compute = c;
01567   msg->data = data;
01568   SET_PRIORITY(msg,seq,prio);
01569   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01570   wdProxy[pe].finishCUDAPatch(msg);
01571 }

void ComputeMgr::sendNonbondedCUDASlaveReady ( int  pe,
int  np,
int  ac,
int  seq 
)

Definition at line 1525 of file ComputeMgr.C.

Referenced by ComputeNonbondedCUDA::noWork().

01525                                                                             {
01526   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01527   cm[pe].recvNonbondedCUDASlaveReady(np,ac,seq);
01528 }

void ComputeMgr::sendNonbondedCUDASlaveSkip ( ComputeNonbondedCUDA c,
int  pe 
)

Definition at line 1541 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute.

Referenced by ComputeNonbondedCUDA::noWork().

01541                                                                            {
01542   NonbondedCUDASkipMsg *msg = new NonbondedCUDASkipMsg;
01543   msg->compute = c;
01544   thisProxy[pe].recvNonbondedCUDASlaveSkip(msg);
01545 }

void ComputeMgr::sendNonbondedMICSlaveEnqueue ( ComputeNonbondedMIC c,
int  pe,
int  seq,
int  prio,
int  ws 
)

Definition at line 1854 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedMIC::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedMIC::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

01854                                                                                                        {
01855   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01856   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01857   msg->compute = c;
01858   int type = c->type();
01859   int cid = c->cid;
01860   SET_PRIORITY(msg,seq,prio);
01861   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01862   wdProxy[pe].enqueueMIC(msg);
01863 }

void ComputeMgr::sendNonbondedMICSlaveReady ( int  pe,
int  np,
int  ac,
int  seq 
)

Definition at line 1825 of file ComputeMgr.C.

01825                                                                            {
01826   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01827   cm[pe].recvNonbondedMICSlaveReady(np,ac,seq);
01828 }

void ComputeMgr::sendNonbondedMICSlaveSkip ( ComputeNonbondedMIC c,
int  pe 
)

Definition at line 1841 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute.

01841                                                                          {
01842   NonbondedMICSkipMsg *msg = new NonbondedMICSkipMsg;
01843   msg->compute = c;
01844   thisProxy[pe].recvNonbondedMICSlaveSkip(msg);
01845 }

void ComputeMgr::sendOpenBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1639 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, PRIORITY_SIZE, PROXY_DATA_PRIORITY, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::doWork().

01639                                                                                {
01640   for (int i=0;i < pes.size();i++) {
01641     CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01642     SET_PRIORITY(msg, c->sequence(), PROXY_DATA_PRIORITY+1); // after bonded
01643     msg->c = c;
01644     thisProxy[pes[i]].recvOpenBoxesOnPe(msg);
01645   }
01646 }

void ComputeMgr::sendSkipPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1599 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

01599                                                                                  {
01600   for (int i=0;i < pes.size();i++) {
01601     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01602     msg->c = c;
01603     thisProxy[pes[i]].recvSkipPatchesOnPe(msg);
01604   }
01605 }

void ComputeMgr::sendUnregisterBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1686 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::~CudaComputeNonbonded().

01686                                                                                      {
01687   for (int i=0;i < pes.size();i++) {
01688     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01689     msg->c = c;
01690     thisProxy[pes[i]].recvUnregisterBoxesOnPe(msg);
01691   }
01692 }

void ComputeMgr::sendYieldDevice ( int  pe  ) 

Definition at line 1434 of file ComputeMgr.C.

Referenced by cuda_check_local_calc(), and cuda_check_remote_calc().

01434                                        {
01435     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01436     cm[pe].recvYieldDevice(CkMyPe());
01437 }

void ComputeMgr::splitComputes (  ) 

Definition at line 175 of file ComputeMgr.C.

References ComputeMap::cloneCompute(), ComputeMap::extendPtrs(), j, ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPartitions(), ComputeMap::Object(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), and ComputeMap::setNumPartitions().

00176 {
00177   if ( ! CkMyRank() ) {
00178     ComputeMap *computeMap = ComputeMap::Object();
00179     const int nc = computeMap->numComputes();
00180 
00181     for (int i=0; i<nc; i++) {
00182       int nnp = computeMap->newNumPartitions(i);
00183       if ( nnp > 0 ) {
00184         if ( computeMap->numPartitions(i) != 1 ) {
00185           CkPrintf("Warning: unable to partition compute %d\n", i);
00186           computeMap->setNewNumPartitions(i,0);
00187           continue;
00188         }
00189         //CkPrintf("splitting compute %d by %d\n",i,nnp);
00190         computeMap->setNumPartitions(i,nnp);
00191         if (computeMap->newNode(i) == -1) {
00192           computeMap->setNewNode(i,computeMap->node(i));
00193         }
00194         for ( int j=1; j<nnp; ++j ) {
00195           int newcid = computeMap->cloneCompute(i,j);
00196           //CkPrintf("compute %d partition %d is %d\n",i,j,newcid);
00197         }
00198       }
00199     }
00200     computeMap->extendPtrs();
00201   }
00202 
00203   if (!CkMyPe())
00204   {
00205     CkStartQD(CkIndex_ComputeMgr::splitComputes2((CkQdMsg*)0), &thishandle);
00206   }
00207 }

void ComputeMgr::splitComputes2 ( CkQdMsg *  msg  ) 

Definition at line 209 of file ComputeMgr.C.

00210 {
00211     delete msg;
00212     CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00213 }

void ComputeMgr::updateComputes ( int  ep,
CkGroupID  chareID 
)

Definition at line 142 of file ComputeMgr.C.

References NAMD_bug().

Referenced by LdbCoordinator::ExecuteMigrations().

00143 {
00144     updateComputesReturnEP = ep;
00145     updateComputesReturnChareID = chareID;
00146     updateComputesCount = CkNumPes();
00147 
00148     if (CkMyPe())
00149     {
00150         NAMD_bug("updateComputes signaled on wrong Pe!");
00151     }
00152 
00153     CkStartQD(CkIndex_ComputeMgr::updateComputes2((CkQdMsg*)0),&thishandle);
00154 }

void ComputeMgr::updateComputes2 ( CkQdMsg *  msg  ) 

Definition at line 156 of file ComputeMgr.C.

References WorkDistrib::saveComputeMapChanges().

00157 {
00158     delete msg;
00159 
00160     CProxy_WorkDistrib wd(CkpvAccess(BOCclass_group).workDistrib);
00161     WorkDistrib  *workDistrib = wd.ckLocalBranch();
00162     workDistrib->saveComputeMapChanges(CkIndex_ComputeMgr::updateComputes3(),thisgroup);
00163 }

void ComputeMgr::updateComputes3 (  ) 

Definition at line 165 of file ComputeMgr.C.

00166 {
00167     if ( skipSplitting ) {
00168       CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00169     } else {
00170       CProxy_ComputeMgr(thisgroup).splitComputes();
00171       skipSplitting = 1;
00172     }
00173 }

void ComputeMgr::updateLocalComputes (  ) 

Definition at line 215 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), ComputeMap::compute(), ProxyMgr::createProxy(), Compute::ldObjHandle, LdbCoordinator::Migrate(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPids(), LdbCoordinator::Object(), ComputeMap::Object(), ComputeMap::pid(), ComputeMap::registerCompute(), and ResizeArray< Elem >::resize().

00216 {
00217     ComputeMap *computeMap = ComputeMap::Object();
00218     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00219     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00220     LdbCoordinator *ldbCoordinator = LdbCoordinator::Object();
00221 
00222      computeFlag.resize(0);
00223 
00224     const int nc = computeMap->numComputes();
00225     for (int i=0; i<nc; i++) {
00226 
00227         if ( computeMap->node(i) == CkMyPe() &&
00228              computeMap->newNumPartitions(i) > 1 ) {
00229            Compute *c = computeMap->compute(i);
00230            ldbCoordinator->Migrate(c->ldObjHandle,CkMyPe());
00231            delete c;
00232            computeMap->registerCompute(i,NULL);
00233            if ( computeMap->newNode(i) == CkMyPe() ) computeFlag.add(i); 
00234         } else
00235         if (computeMap->newNode(i) == CkMyPe() && computeMap->node(i) != CkMyPe())
00236         {
00237             computeFlag.add(i);
00238             for (int n=0; n < computeMap->numPids(i); n++)
00239             {
00240                 proxyMgr->createProxy(computeMap->pid(i,n));
00241             }
00242         }
00243         else if (computeMap->node(i) == CkMyPe() &&
00244                  (computeMap->newNode(i) != -1 && computeMap->newNode(i) != CkMyPe() ))
00245         {
00246             // CkPrintf("delete compute %d on pe %d\n",i,CkMyPe());
00247             delete computeMap->compute(i);
00248             computeMap->registerCompute(i,NULL);
00249         }
00250     }
00251 
00252     if (!CkMyPe())
00253     {
00254         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes2((CkQdMsg*)0), &thishandle);
00255     }
00256 }

void ComputeMgr::updateLocalComputes2 ( CkQdMsg *  msg  ) 

Definition at line 259 of file ComputeMgr.C.

00260 {
00261     delete msg;
00262     CProxy_ComputeMgr(thisgroup).updateLocalComputes3();
00263 }

void ComputeMgr::updateLocalComputes3 (  ) 

Definition at line 266 of file ComputeMgr.C.

References ResizeArray< Elem >::clear(), ComputeMap::newNode(), ProxyMgr::nodecount, ComputeMap::numComputes(), ComputeMap::Object(), ProxyMgr::removeUnusedProxies(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), ComputeMap::setNode(), and ResizeArray< Elem >::size().

00267 {
00268     ComputeMap *computeMap = ComputeMap::Object();
00269     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00270     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00271 
00272     ProxyMgr::nodecount = 0;
00273 
00274     const int nc = computeMap->numComputes();
00275 
00276     if ( ! CkMyRank() ) {
00277       for (int i=0; i<nc; i++) {
00278         computeMap->setNewNumPartitions(i,0);
00279         if (computeMap->newNode(i) != -1) {
00280           computeMap->setNode(i,computeMap->newNode(i));
00281           computeMap->setNewNode(i,-1);
00282         }
00283       }
00284     }
00285  
00286     for(int i=0; i<computeFlag.size(); i++) createCompute(computeFlag[i], computeMap);
00287     computeFlag.clear();
00288 
00289     proxyMgr->removeUnusedProxies();
00290 
00291     if (!CkMyPe())
00292     {
00293         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes4((CkQdMsg*)0), &thishandle);
00294     }
00295 }

void ComputeMgr::updateLocalComputes4 ( CkQdMsg *  msg  ) 

Definition at line 298 of file ComputeMgr.C.

References SimParameters::computeMapFilename, ComputeMap::Object(), Node::Object(), ComputeMap::saveComputeMap(), Node::simParameters, simParams, and SimParameters::storeComputeMap.

00299 {
00300     delete msg;
00301     CProxy_ComputeMgr(thisgroup).updateLocalComputes5();
00302 
00303     // store the latest compute map
00304            SimParameters *simParams = Node::Object()->simParameters;
00305     if (simParams->storeComputeMap) {
00306       ComputeMap *computeMap = ComputeMap::Object();
00307       computeMap->saveComputeMap(simParams->computeMapFilename);
00308     }
00309 }

void ComputeMgr::updateLocalComputes5 (  ) 

Definition at line 316 of file ComputeMgr.C.

References ProxyMgr::buildProxySpanningTree2(), PatchMap::checkMap(), ComputeMap::checkMap(), ProxyMgr::Object(), PatchMap::Object(), ComputeMap::Object(), proxyRecvSpanning, proxySendSpanning, and ProxyMgr::sendSpanningTrees().

00317 {
00318     if ( ! CkMyRank() ) {
00319       ComputeMap::Object()->checkMap();
00320       PatchMap::Object()->checkMap();
00321     }
00322 
00323     // we always use the centralized building of spanning tree
00324     // distributed building of ST called in Node.C only
00325     if (proxySendSpanning || proxyRecvSpanning)
00326         ProxyMgr::Object()->buildProxySpanningTree2();
00327 
00328     // this code needs to be turned on if we want to
00329     // shift the creation of ST to the load balancer
00330 
00331 #if 0
00332     if (proxySendSpanning || proxyRecvSpanning)
00333     {
00334         if (firstphase)
00335             ProxyMgr::Object()->buildProxySpanningTree2();
00336         else
00337             if (CkMyPe() == 0)
00338                 ProxyMgr::Object()->sendSpanningTrees();
00339 
00340         firstphase = 0;
00341     }
00342 #endif
00343 
00344     if (!CkMyPe())
00345         CkStartQD(CkIndex_ComputeMgr::doneUpdateLocalComputes(), &thishandle);
00346 }


Member Data Documentation

Definition at line 100 of file ComputeMgr.h.

Referenced by ComputeMgr(), and sendComputeGlobalResults().

Definition at line 98 of file ComputeMgr.h.

Referenced by enableComputeGlobalResults(), and recvComputeGlobalResults().

Definition at line 99 of file ComputeMgr.h.

Referenced by ComputeMgr(), enableComputeGlobalResults(), and recvComputeGlobalResults().


The documentation for this class was generated from the following files:

Generated on 12 Jul 2020 for NAMD by  doxygen 1.6.1