ComputeMgr Class Reference

#include <ComputeMgr.h>

List of all members.

Public Member Functions

 ComputeMgr ()
 ~ComputeMgr ()
void createComputes (ComputeMap *map)
void updateComputes (int, CkGroupID)
void updateComputes2 (CkQdMsg *)
void updateComputes3 ()
void splitComputes ()
void splitComputes2 (CkQdMsg *)
void updateLocalComputes ()
void updateLocalComputes2 (CkQdMsg *)
void updateLocalComputes3 ()
void updateLocalComputes4 (CkQdMsg *)
void updateLocalComputes5 ()
void doneUpdateLocalComputes ()
void sendComputeGlobalConfig (ComputeGlobalConfigMsg *)
void recvComputeGlobalConfig (ComputeGlobalConfigMsg *)
void sendComputeGlobalData (ComputeGlobalDataMsg *)
void recvComputeGlobalData (ComputeGlobalDataMsg *)
void sendComputeGlobalResults (ComputeGlobalResultsMsg *)
void recvComputeGlobalResults (ComputeGlobalResultsMsg *)
void enableComputeGlobalResults ()
void sendComputeDPMEData (ComputeDPMEDataMsg *)
void recvComputeDPMEData (ComputeDPMEDataMsg *)
void sendComputeDPMEResults (ComputeDPMEResultsMsg *, int)
void recvComputeDPMEResults (ComputeDPMEResultsMsg *)
void sendComputeEwaldData (ComputeEwaldMsg *)
void recvComputeEwaldData (ComputeEwaldMsg *)
void sendComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeEwaldResults (ComputeEwaldMsg *)
void recvComputeConsForceMsg (ComputeConsForceMsg *)
void sendYieldDevice (int pe)
void recvYieldDevice (int pe)
void sendBuildCudaExclusions ()
void recvBuildCudaExclusions ()
void sendBuildCudaForceTable ()
void recvBuildCudaForceTable ()
void sendBuildMICForceTable ()
void recvBuildMICForceTable ()
void sendCreateNonbondedCUDASlave (int, int)
void recvCreateNonbondedCUDASlave (NonbondedCUDASlaveMsg *)
void sendNonbondedCUDASlaveReady (int, int, int, int)
void recvNonbondedCUDASlaveReady (int, int, int)
void sendNonbondedCUDASlaveSkip (ComputeNonbondedCUDA *c, int)
void recvNonbondedCUDASlaveSkip (NonbondedCUDASkipMsg *)
void sendNonbondedCUDASlaveEnqueue (ComputeNonbondedCUDA *c, int, int, int, int)
void sendNonbondedCUDASlaveEnqueuePatch (ComputeNonbondedCUDA *c, int, int, int, int, FinishWorkMsg *)
void sendAssignPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvAssignPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendSkipPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvSkipPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvFinishPatchesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishPatchOnPe (int pe, CudaComputeNonbonded *c, int i, PatchID patchID)
void recvFinishPatchOnPe (CudaComputeNonbondedMsg *msg)
void sendOpenBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvOpenBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendFinishReductions (int pe, CudaComputeNonbonded *c)
void recvFinishReductions (CudaComputeNonbondedMsg *msg)
void sendMessageEnqueueWork (int pe, CudaComputeNonbonded *c)
void recvMessageEnqueueWork (CudaComputeNonbondedMsg *msg)
void sendLaunchWork (int pe, CudaComputeNonbonded *c)
void recvLaunchWork (CudaComputeNonbondedMsg *msg)
void sendUnregisterBoxesOnPe (std::vector< int > &pes, CudaComputeNonbonded *c)
void recvUnregisterBoxesOnPe (CudaComputeNonbondedMsg *msg)
void sendCreateNonbondedMICSlave (int, int)
void recvCreateNonbondedMICSlave (NonbondedMICSlaveMsg *)
void sendNonbondedMICSlaveReady (int, int, int, int)
void recvNonbondedMICSlaveReady (int, int, int)
void sendNonbondedMICSlaveSkip (ComputeNonbondedMIC *c, int)
void recvNonbondedMICSlaveSkip (NonbondedMICSkipMsg *)
void sendNonbondedMICSlaveEnqueue (ComputeNonbondedMIC *c, int, int, int, int)
void sendMICPEData (int, int)
void recvMICPEData (int, int)
int isMICProcessor (int)

Public Attributes

ComputeGlobalcomputeGlobalObject
ResizeArray
< ComputeGlobalResultsMsg * > 
computeGlobalResultsMsgs
int computeGlobalResultsMsgSeq
int computeGlobalResultsMsgMasterSeq

Detailed Description

Definition at line 57 of file ComputeMgr.h.


Constructor & Destructor Documentation

ComputeMgr::ComputeMgr (  ) 

Definition at line 109 of file ComputeMgr.C.

References computeGlobalObject, computeGlobalResultsMsgMasterSeq, computeGlobalResultsMsgSeq, and NAMD_die().

00110 {
00111     CkpvAccess(BOCclass_group).computeMgr = thisgroup;
00112     computeGlobalObject = 0;
00113     computeGlobalResultsMsgSeq = -1;
00114     computeGlobalResultsMsgMasterSeq = -1;
00115     computeDPMEObject = 0;
00116     computeEwaldObject = 0;
00117     computeNonbondedCUDAObject = 0;
00118     computeNonbondedMICObject = 0;
00119     computeNonbondedWorkArrays = new ComputeNonbondedWorkArrays;
00120     skipSplitting = 0;
00121 
00122     #if defined(NAMD_MIC)
00123       // Create the micPEData flag array (1 bit per PE) and initially set each PE as "not driving
00124       //   a MIC card" (unset).  PEs that are driving MIC card will identify themselves during startup.
00125       int numPEs = CkNumPes();
00126       int numInts = ((numPEs + (sizeof(int)*8-1)) & (~(sizeof(int)*8-1))) / (sizeof(int)*8);  // Round up to sizeof(int) then divide by the size of an int
00127       micPEData = new int[numInts];
00128       if (micPEData == NULL) { NAMD_die("Unable to allocate memory for micPEData"); }
00129       memset(micPEData, 0, sizeof(int) * numInts);
00130     #else
00131       micPEData = NULL;
00132     #endif
00133 }

ComputeMgr::~ComputeMgr ( void   ) 

Definition at line 135 of file ComputeMgr.C.

00136 {
00137     delete computeNonbondedWorkArrays;
00138 }


Member Function Documentation

void ComputeMgr::createComputes ( ComputeMap map  ) 

Definition at line 1007 of file ComputeMgr.C.

References GlobalMasterServer::addClient(), ComputeNonbondedMIC::assignPatches(), ComputeNonbondedCUDA::assignPatches(), CudaComputeNonbonded::assignPatches(), SimParameters::bondedCUDA, SimParameters::colvarsOn, computeAnglesType, computeBondsType, computeCrosstermsType, computeDihedralsType, computeExclsType, computeImpropersType, computeNonbondedCUDA2Type, computeNonbondedCUDAType, computeNonbondedMICType, computeNonbondedPairType, computeNonbondedSelfType, computeSelfAnglesType, computeSelfBondsType, computeSelfCrosstermsType, computeSelfDihedralsType, computeSelfExclsType, computeSelfImpropersType, DebugM, DeviceCUDA::device_shared_with_pe(), deviceCUDA, SimParameters::firstTimestep, SimParameters::freeEnergyOn, getCudaComputeNonbonded(), DeviceCUDA::getMasterPe(), SimParameters::globalForcesOn, SimParameters::IMDignore, SimParameters::IMDignoreForces, SimParameters::IMDon, CudaComputeNonbonded::initialize(), j, mic_device_pe(), mic_device_shared_with_pe(), SimParameters::miscForcesOn, Node::molecule, Node::myid(), Molecule::numAtoms, PatchMap::Object(), Node::Object(), Node::simParameters, SimParameters::SMDDir, SimParameters::SMDFile, SimParameters::SMDk, SimParameters::SMDk2, SimParameters::SMDOn, SimParameters::SMDOutputFreq, SimParameters::SMDVel, SimParameters::symmetryLastStep, SimParameters::symmetryOn, SimParameters::tclForcesOn, SimParameters::TMDOn, ComputeMap::type(), and SimParameters::useCUDA2.

Referenced by Node::startup().

01008 {
01009 // #ifdef NAMD_CUDA
01010 //     int ComputePmeCUDACounter = 0;
01011 // #endif
01012     Node *node = Node::Object();
01013     SimParameters *simParams = node->simParameters;
01014     int myNode = node->myid();
01015 
01016     if ( simParams->globalForcesOn && !myNode )
01017     {
01018         DebugM(4,"Mgr running on Node "<<CkMyPe()<<"\n");
01019         /* create a master server to allow multiple masters */
01020         masterServerObject = new GlobalMasterServer(this,
01021                 PatchMap::Object()->numNodesWithPatches());
01022 
01023         /* create the individual global masters */
01024         // masterServerObject->addClient(new GlobalMasterTest());
01025         if (simParams->tclForcesOn)
01026             masterServerObject->addClient(new GlobalMasterTcl());
01027         if (simParams->IMDon && ! (simParams->IMDignore || simParams->IMDignoreForces) )
01028             masterServerObject->addClient(new GlobalMasterIMD());
01029 
01030         if (simParams->SMDOn)
01031             masterServerObject->addClient(
01032                 new GlobalMasterSMD(simParams->SMDk, simParams->SMDk2,
01033                                     simParams->SMDVel,
01034                                     simParams->SMDDir, simParams->SMDOutputFreq,
01035                                     simParams->firstTimestep, simParams->SMDFile,
01036                                     node->molecule->numAtoms)
01037             );
01038             
01039         if (simParams->symmetryOn && 
01040           (simParams->firstTimestep < simParams->symmetryLastStep || 
01041           simParams->symmetryLastStep == -1))
01042             masterServerObject->addClient(new GlobalMasterSymmetry());    
01043         if (simParams->TMDOn)
01044             masterServerObject->addClient(new GlobalMasterTMD());
01045         if (simParams->miscForcesOn)
01046             masterServerObject->addClient(new GlobalMasterMisc());
01047         if ( simParams->freeEnergyOn )
01048             masterServerObject->addClient(new GlobalMasterFreeEnergy());
01049                 if ( simParams->colvarsOn )
01050                         masterServerObject->addClient(new GlobalMasterColvars());
01051 
01052     }
01053 
01054     if ( !myNode && simParams->IMDon && (simParams->IMDignore || simParams->IMDignoreForces) ) {
01055       // GlobalMasterIMD constructor saves pointer to node->IMDOutput object
01056       new GlobalMasterIMD();
01057     }
01058 
01059 #ifdef NAMD_CUDA
01060     bool deviceIsMine = ( deviceCUDA->getMasterPe() == CkMyPe() );
01061 #ifdef BONDED_CUDA
01062     // Place bonded forces on Pe different from non-bonded forces
01063     int bondedMasterPe = deviceCUDA->getMasterPe();
01064     // for (int i=0;i < deviceCUDA->getNumPesSharingDevice();i++) {
01065     //   int pe = deviceCUDA->getPesSharingDevice(i);
01066     //   if (pe != deviceCUDA->getMasterPe()) {
01067     //     bondedMasterPe = pe;
01068     //   }
01069     // }
01070     bool deviceIsMineBonded = (CkMyPe() == bondedMasterPe);
01071 #endif
01072 #endif
01073 
01074     #ifdef NAMD_MIC
01075       bool deviceIsMine = ( mic_device_pe() == CkMyPe() );
01076     #endif
01077 
01078     for (int i=0; i < map->nComputes; i++)
01079     {
01080         if ( ! ( i % 100 ) )
01081         {
01082         }
01083 
01084 #if defined(NAMD_CUDA) || defined(NAMD_MIC)
01085         switch ( map->type(i) )
01086         {
01087 #ifdef NAMD_CUDA
01088           // case computePmeCUDAType:
01089           //   // Only create single ComputePmeCUDA object per Pe
01090           //  if ( map->computeData[i].node != myNode ) continue;
01091           //  if (ComputePmeCUDACounter > 0) continue;
01092           //  ComputePmeCUDACounter++;
01093           //  break;
01094           case computeNonbondedSelfType:
01095             if ( ! deviceIsMine ) continue;
01096             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01097           break;
01098 
01099           case computeNonbondedPairType:
01100             if ( ! deviceIsMine ) continue;
01101             if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01102           break;
01103 
01104 #ifdef BONDED_CUDA
01105           case computeSelfBondsType:
01106           case computeBondsType:
01107             if (simParams->bondedCUDA & 1) {
01108               if ( ! deviceIsMineBonded ) continue;
01109               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01110             } else {
01111               if ( map->computeData[i].node != myNode ) continue;
01112             }
01113           break;
01114 
01115           case computeSelfAnglesType:
01116           case computeAnglesType:
01117             if (simParams->bondedCUDA & 2) {
01118               if ( ! deviceIsMineBonded ) continue;
01119               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01120             } else {
01121               if ( map->computeData[i].node != myNode ) continue;
01122             }
01123           break;
01124 
01125           case computeSelfDihedralsType:
01126           case computeDihedralsType:
01127             if (simParams->bondedCUDA & 4) {
01128               if ( ! deviceIsMineBonded ) continue;
01129               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01130             } else {
01131               if ( map->computeData[i].node != myNode ) continue;
01132             }
01133           break;
01134 
01135           case computeSelfImpropersType:
01136           case computeImpropersType:
01137             if (simParams->bondedCUDA & 8) {
01138               if ( ! deviceIsMineBonded ) continue;
01139               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01140             } else {
01141               if ( map->computeData[i].node != myNode ) continue;
01142             }
01143           break;
01144 
01145           case computeSelfExclsType:
01146           case computeExclsType:
01147             if (simParams->bondedCUDA & 16) {
01148               if ( ! deviceIsMineBonded ) continue;
01149               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01150             } else {
01151               if ( map->computeData[i].node != myNode ) continue;
01152             }
01153           break;
01154 
01155           case computeSelfCrosstermsType:
01156           case computeCrosstermsType:
01157             if (simParams->bondedCUDA & 32) {
01158               if ( ! deviceIsMineBonded ) continue;
01159               if ( ! deviceCUDA->device_shared_with_pe(map->computeData[i].node) ) continue;
01160             } else {
01161               if ( map->computeData[i].node != myNode ) continue;
01162             }
01163           break;
01164 
01165           case computeBondedCUDAType:
01166             if ( ! deviceIsMineBonded ) continue;
01167             if ( map->computeData[i].node != myNode ) continue;
01168           break;
01169 #endif
01170 
01171 #endif
01172 #ifdef NAMD_MIC
01173 
01174           case computeNonbondedSelfType:
01175             if (map->directToDevice(i) != 0) { // If should be directed to the device...
01176               if ( ! deviceIsMine ) continue;
01177               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01178             } else { // ... otherwise, direct to host...
01179               if (map->computeData[i].node != myNode) { continue; }
01180             }
01181             break;
01182 
01183           case computeNonbondedPairType:
01184             if (map->directToDevice(i)) { // If should be directed to the device...
01185               if ( ! deviceIsMine ) continue;
01186               if ( ! mic_device_shared_with_pe(map->computeData[i].node) ) continue;
01187             } else { // ... otherwise, direct to host...
01188               if (map->computeData[i].node != myNode) { continue; }
01189             }
01190             break;
01191 
01192 #endif
01193           case computeNonbondedCUDAType:
01194 #ifdef NAMD_CUDA
01195           case computeNonbondedCUDA2Type:
01196 // #ifdef BONDED_CUDA
01197 //           case computeBondedCUDAType:
01198 // #endif
01199 #endif
01200           case computeNonbondedMICType:
01201             if ( ! deviceIsMine ) continue;
01202           default:
01203             if ( map->computeData[i].node != myNode ) continue;
01204         }
01205 #else // defined(NAMD_CUDA) || defined(NAMD_MIC)
01206         if ( map->computeData[i].node != myNode ) continue;
01207 #endif
01208         DebugM(1,"Compute " << i << '\n');
01209         DebugM(1,"  node = " << map->computeData[i].node << '\n');
01210         DebugM(1,"  type = " << map->computeData[i].type << '\n');
01211         DebugM(1,"  numPids = " << map->computeData[i].numPids << '\n');
01212 //         DebugM(1,"  numPidsAllocated = " << map->computeData[i].numPidsAllocated << '\n');
01213         for (int j=0; j < map->computeData[i].numPids; j++)
01214         {
01215             DebugM(1,"  pid " << map->computeData[i].pids[j].pid << '\n');
01216             if (!((j+1) % 6))
01217                 DebugM(1,'\n');
01218         }
01219         DebugM(1,"\n---------------------------------------");
01220         DebugM(1,"---------------------------------------\n");
01221 
01222         createCompute(i, map);
01223 
01224     }
01225 
01226 #ifdef NAMD_CUDA
01227     if (simParams->useCUDA2) {
01228       if (deviceIsMine) {
01229         getCudaComputeNonbonded()->assignPatches(this);
01230         getCudaComputeNonbonded()->initialize();
01231       }
01232     } else {
01233       if ( computeNonbondedCUDAObject ) {
01234         computeNonbondedCUDAObject->assignPatches();
01235       }      
01236     }
01237 #ifdef BONDED_CUDA
01238     if (simParams->bondedCUDA) {
01239       if (deviceIsMineBonded) {
01240         getComputeBondedCUDA()->initialize();
01241       }
01242     }
01243 #endif
01244 #endif
01245 #ifdef NAMD_MIC
01246     if ( computeNonbondedMICObject ) {
01247       computeNonbondedMICObject->assignPatches();
01248     }
01249 #endif
01250 
01251 }

void ComputeMgr::doneUpdateLocalComputes (  ) 

Definition at line 346 of file ComputeMgr.C.

References DebugM.

00347 {
00348 
00349 //  if (!--updateComputesCount) {
00350     DebugM(4, "doneUpdateLocalComputes on Pe("<<CkMyPe()<<")\n");
00351     void *msg = CkAllocMsg(0,0,0);
00352     CkSendMsgBranch(updateComputesReturnEP,msg,0,updateComputesReturnChareID);
00353 //  }
00354 }

void ComputeMgr::enableComputeGlobalResults (  ) 
int ComputeMgr::isMICProcessor ( int  pe  ) 

Definition at line 1883 of file ComputeMgr.C.

01883                                      {
01884   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return 0; }
01885   int majorIndex = pe / (sizeof(int)*8);
01886   int minorIndex = pe % (sizeof(int)*8);
01887   return ((micPEData[majorIndex] >> minorIndex) & 0x01);
01888 }

void ComputeMgr::recvAssignPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1592 of file ComputeMgr.C.

References CudaComputeNonbonded::assignPatchesOnPe(), and CudaComputeNonbondedMsg::c.

01592                                                                    {
01593   msg->c->assignPatchesOnPe();
01594   delete msg;
01595 }

void ComputeMgr::recvBuildCudaExclusions (  ) 

Definition at line 1459 of file ComputeMgr.C.

References build_cuda_exclusions().

01459                                          {
01460 #ifdef NAMD_CUDA
01461     build_cuda_exclusions();
01462 #endif
01463 }

void ComputeMgr::recvBuildCudaForceTable (  ) 

Definition at line 1478 of file ComputeMgr.C.

References build_cuda_force_table().

01478                                          {
01479 #ifdef NAMD_CUDA
01480     build_cuda_force_table();
01481 #endif
01482 }

void ComputeMgr::recvBuildMICForceTable (  ) 

Definition at line 1497 of file ComputeMgr.C.

01497                                         {
01498   #ifdef NAMD_MIC
01499     build_mic_force_table();
01500   #endif
01501 }

void ComputeMgr::recvComputeConsForceMsg ( ComputeConsForceMsg msg  ) 

Definition at line 1406 of file ComputeMgr.C.

References ComputeConsForceMsg::aid, Molecule::consForce, Molecule::consForceIndexes, ComputeConsForceMsg::f, for(), Node::molecule, Molecule::numAtoms, Node::Object(), and ResizeArray< Elem >::size().

01407 {
01408     Molecule *m = Node::Object()->molecule;
01409     delete [] m->consForceIndexes;
01410     delete [] m->consForce;
01411     int n = msg->aid.size();
01412     if (n > 0)
01413     {
01414         m->consForceIndexes = new int32[m->numAtoms];
01415         m->consForce = new Vector[n];
01416         int i;
01417         for (i=0; i<m->numAtoms; i++) m->consForceIndexes[i] = -1;
01418         for (i=0; i<msg->aid.size(); i++)
01419         {
01420             m->consForceIndexes[msg->aid[i]] = i;
01421             m->consForce[i] = msg->f[i];
01422         }
01423     }
01424     else
01425     {
01426         m->consForceIndexes = NULL;
01427         m->consForce = NULL;
01428     }
01429     delete msg;
01430 }

void ComputeMgr::recvComputeDPMEData ( ComputeDPMEDataMsg msg  ) 

Definition at line 1376 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01377 {
01378     if ( computeDPMEObject )
01379     {
01380 #ifdef DPME
01381         computeDPMEObject->recvData(msg);
01382 #endif
01383     }
01384     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01385     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01386 }

void ComputeMgr::recvComputeDPMEResults ( ComputeDPMEResultsMsg msg  ) 

Definition at line 1394 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01395 {
01396     if ( computeDPMEObject )
01397     {
01398 #ifdef DPME
01399         computeDPMEObject->recvResults(msg);
01400 #endif
01401     }
01402     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01403     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01404 }

void ComputeMgr::recvComputeEwaldData ( ComputeEwaldMsg msg  ) 

Definition at line 1339 of file ComputeMgr.C.

References NAMD_die(), and ComputeEwald::recvData().

01340 {
01341     if (computeEwaldObject)
01342         computeEwaldObject->recvData(msg);
01343     else NAMD_die("ComputeMgr::computeEwaldObject in recvData is NULL!");
01344 }

void ComputeMgr::recvComputeEwaldResults ( ComputeEwaldMsg msg  ) 

Definition at line 1351 of file ComputeMgr.C.

References NAMD_die(), PatchMap::Object(), and ComputeEwald::recvResults().

Referenced by sendComputeEwaldResults().

01352 {
01353     if (computeEwaldObject) {
01354         CmiEnableUrgentSend(1);
01355         computeEwaldObject->recvResults(msg);
01356         CmiEnableUrgentSend(0);
01357     }
01358     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01359     else NAMD_die("ComputeMgr::computeEwaldObject in recvResults is NULL!");
01360 }

void ComputeMgr::recvComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 
void ComputeMgr::recvComputeGlobalData ( ComputeGlobalDataMsg msg  ) 

Definition at line 1276 of file ComputeMgr.C.

References NAMD_die(), and GlobalMasterServer::recvData().

01277 {
01278     if (masterServerObject)  // make sure it has been initialized
01279     {
01280         masterServerObject->recvData(msg);
01281     }
01282     else NAMD_die("ComputeMgr::masterServerObject is NULL!");
01283 }

void ComputeMgr::recvComputeGlobalResults ( ComputeGlobalResultsMsg msg  ) 

Definition at line 1304 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), computeGlobalObject, computeGlobalResultsMsgs, computeGlobalResultsMsgSeq, NAMD_die(), PatchMap::Object(), ComputeGlobal::recvResults(), and ComputeGlobalResultsMsg::seq.

Referenced by enableComputeGlobalResults().

01305 {
01306     if ( computeGlobalObject )
01307     {
01308       if ( msg->seq == computeGlobalResultsMsgSeq ) {
01309         CmiEnableUrgentSend(1);
01310         computeGlobalObject->recvResults(msg);
01311         CmiEnableUrgentSend(0);
01312       } else {
01313         computeGlobalResultsMsgs.add(msg);
01314       }
01315     }
01316     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01317     else NAMD_die("ComputeMgr::computeGlobalObject is NULL!");
01318 }

void ComputeMgr::recvCreateNonbondedCUDASlave ( NonbondedCUDASlaveMsg msg  ) 

Definition at line 1517 of file ComputeMgr.C.

References Compute::cid, NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

01517                                                                         {
01518 #ifdef NAMD_CUDA
01519   new ComputeNonbondedCUDA(msg->master->cid,this,msg->master,msg->index);
01520 #endif
01521 }

void ComputeMgr::recvCreateNonbondedMICSlave ( NonbondedMICSlaveMsg msg  ) 

Definition at line 1817 of file ComputeMgr.C.

References Compute::cid, NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01817                                                                       {
01818 #ifdef NAMD_MIC
01819   ComputeNonbondedMIC *c = new ComputeNonbondedMIC(msg->master->cid,this,msg->master,msg->index);
01820 #endif
01821 }

void ComputeMgr::recvFinishPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1619 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishPatchesOnPe().

01619                                                                    {
01620   msg->c->finishPatchesOnPe();
01621   delete msg;
01622 }

void ComputeMgr::recvFinishPatchOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1632 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, CudaComputeNonbonded::finishPatchOnPe(), and CudaComputeNonbondedMsg::i.

01632                                                                  {
01633   msg->c->finishPatchOnPe(msg->i);
01634   delete msg;
01635 }

void ComputeMgr::recvFinishReductions ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1657 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::finishReductions().

01657                                                                   {
01658   msg->c->finishReductions();
01659   delete msg;
01660 }

void ComputeMgr::recvLaunchWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1679 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::launchWork().

01679                                                             {
01680   msg->c->launchWork();
01681   delete msg;
01682 }

void ComputeMgr::recvMessageEnqueueWork ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1668 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::messageEnqueueWork().

01668                                                                     {
01669   msg->c->messageEnqueueWork();
01670   delete msg;
01671 }

void ComputeMgr::recvMICPEData ( int  pe,
int  data 
)

Definition at line 1868 of file ComputeMgr.C.

01868                                                {
01869   if (pe < 0 || pe >= CkNumPes() || micPEData == NULL) { return; }
01870   int majorIndex = pe / (sizeof(int)*8);
01871   int minorIndex = pe % (sizeof(int)*8);
01872   if (data != 0) {
01873     micPEData[majorIndex] |= (0x01 << minorIndex);
01874   } else {
01875     micPEData[majorIndex] &= ((~0x01) << minorIndex);
01876   }
01877 }

void ComputeMgr::recvNonbondedCUDASlaveReady ( int  np,
int  ac,
int  seq 
)

Definition at line 1528 of file ComputeMgr.C.

References Compute::patchReady().

01528                                                                     {
01529   for ( int i=0; i<np; ++i ) {
01530     computeNonbondedCUDAObject->patchReady(-1,ac,seq);
01531   }
01532 }

void ComputeMgr::recvNonbondedCUDASlaveSkip ( NonbondedCUDASkipMsg msg  ) 

Definition at line 1545 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute, and ComputeNonbondedCUDA::skip().

01545                                                                      {
01546 #ifdef NAMD_CUDA
01547   msg->compute->skip();
01548 #endif
01549   delete msg;
01550 }

void ComputeMgr::recvNonbondedMICSlaveReady ( int  np,
int  ac,
int  seq 
)

Definition at line 1828 of file ComputeMgr.C.

References Compute::patchReady().

01828                                                                    {
01829   for ( int i=0; i<np; ++i ) {
01830     computeNonbondedMICObject->patchReady(-1,ac,seq);
01831   }
01832 }

void ComputeMgr::recvNonbondedMICSlaveSkip ( NonbondedMICSkipMsg msg  ) 

Definition at line 1845 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute, and ComputeNonbondedMIC::skip().

01845                                                                    {
01846 #ifdef NAMD_MIC
01847   msg->compute->skip();
01848 #endif
01849   delete msg;
01850 }

void ComputeMgr::recvOpenBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1646 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::openBoxesOnPe().

01646                                                                {
01647   msg->c->openBoxesOnPe();
01648   delete msg;
01649 }

void ComputeMgr::recvSkipPatchesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1605 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::skipPatchesOnPe().

01605                                                                  {
01606   msg->c->skipPatchesOnPe();
01607   delete msg;
01608 }

void ComputeMgr::recvUnregisterBoxesOnPe ( CudaComputeNonbondedMsg msg  ) 

Definition at line 1692 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, and CudaComputeNonbonded::unregisterBoxesOnPe().

01692                                                                      {
01693   msg->c->unregisterBoxesOnPe();
01694   delete msg;
01695 }

void ComputeMgr::recvYieldDevice ( int  pe  ) 

Definition at line 1437 of file ComputeMgr.C.

References ComputeNonbondedMIC::recvYieldDevice(), and ComputeNonbondedCUDA::recvYieldDevice().

01437                                        {
01438 #ifdef NAMD_CUDA
01439     computeNonbondedCUDAObject->recvYieldDevice(pe);
01440 #endif
01441 #ifdef NAMD_MIC
01442     computeNonbondedMICObject->recvYieldDevice(pe);
01443 #endif
01444 }

void ComputeMgr::sendAssignPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1584 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::assignPatches().

01584                                                                                    {
01585   for (int i=0;i < pes.size();i++) {
01586     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01587     msg->c = c;
01588     thisProxy[pes[i]].recvAssignPatchesOnPe(msg);
01589   }
01590 }

void ComputeMgr::sendBuildCudaExclusions (  ) 

Definition at line 1446 of file ComputeMgr.C.

Referenced by Node::resendMolecule().

01446                                          {
01447     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01448     int pe = CkNodeFirst(CkMyNode());
01449     int end = pe + CkNodeSize(CkMyNode());
01450     for( ; pe != end; ++pe ) {
01451       cm[pe].recvBuildCudaExclusions();
01452     }
01453 }

void ComputeMgr::sendBuildCudaForceTable (  ) 

Definition at line 1465 of file ComputeMgr.C.

Referenced by send_build_cuda_force_table().

01465                                          {
01466     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01467     int pe = CkNodeFirst(CkMyNode());
01468     int end = pe + CkNodeSize(CkMyNode());
01469     for( ; pe != end; ++pe ) {
01470       cm[pe].recvBuildCudaForceTable();
01471     }
01472 }

void ComputeMgr::sendBuildMICForceTable (  ) 

Definition at line 1484 of file ComputeMgr.C.

01484                                         {
01485   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01486   int pe = CkNodeFirst(CkMyNode());
01487   int end = pe + CkNodeSize(CkMyNode());
01488   for( ; pe != end; ++pe ) {
01489     cm[pe].recvBuildMICForceTable();
01490   }
01491 }

void ComputeMgr::sendComputeDPMEData ( ComputeDPMEDataMsg msg  ) 

Definition at line 1362 of file ComputeMgr.C.

References NAMD_die(), and PatchMap::Object().

01363 {
01364     if ( computeDPMEObject )
01365     {
01366 #ifdef DPME
01367         int node = computeDPMEObject->getMasterNode();
01368         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01369         cm.recvComputeDPMEData(msg,node);
01370 #endif
01371     }
01372     else if ( ! (PatchMap::Object())->numHomePatches() ) delete msg;
01373     else NAMD_die("ComputeMgr::computeDPMEObject is NULL!");
01374 }

void ComputeMgr::sendComputeDPMEResults ( ComputeDPMEResultsMsg msg,
int  node 
)

Definition at line 1388 of file ComputeMgr.C.

01389 {
01390     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01391     cm[node].recvComputeDPMEResults(msg);
01392 }

void ComputeMgr::sendComputeEwaldData ( ComputeEwaldMsg msg  ) 

Definition at line 1323 of file ComputeMgr.C.

References ComputeEwald::getMasterNode(), NAMD_die(), and PatchMap::Object().

Referenced by ComputeEwald::doWork().

01324 {
01325     if (computeEwaldObject)
01326     {
01327         int node = computeEwaldObject->getMasterNode();
01328         CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01329         cm[node].recvComputeEwaldData(msg);
01330     }
01331     else if (!PatchMap::Object()->numHomePatches())
01332     {
01333         CkPrintf("skipping message on Pe(%d)\n", CkMyPe());
01334         delete msg;
01335     }
01336     else NAMD_die("ComputeMgr::computeEwaldObject is NULL!");
01337 }

void ComputeMgr::sendComputeEwaldResults ( ComputeEwaldMsg msg  ) 

Definition at line 1346 of file ComputeMgr.C.

References recvComputeEwaldResults().

Referenced by ComputeEwald::recvData().

01347 {
01348     (CProxy_ComputeMgr(CkpvAccess(BOCclass_group).computeMgr)).recvComputeEwaldResults(msg);
01349 }

void ComputeMgr::sendComputeGlobalConfig ( ComputeGlobalConfigMsg *   ) 
void ComputeMgr::sendComputeGlobalData ( ComputeGlobalDataMsg msg  ) 

Definition at line 1270 of file ComputeMgr.C.

Referenced by ComputeGlobal::doWork().

01271 {
01272     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01273     cm[0].recvComputeGlobalData(msg);
01274 }

void ComputeMgr::sendComputeGlobalResults ( ComputeGlobalResultsMsg msg  ) 

Definition at line 1285 of file ComputeMgr.C.

References computeGlobalResultsMsgMasterSeq, and ComputeGlobalResultsMsg::seq.

01286 {
01287     msg->seq = ++computeGlobalResultsMsgMasterSeq;
01288     thisProxy.recvComputeGlobalResults(msg);
01289 }

void ComputeMgr::sendCreateNonbondedCUDASlave ( int  pe,
int  index 
)

Definition at line 1509 of file ComputeMgr.C.

References NonbondedCUDASlaveMsg::index, and NonbondedCUDASlaveMsg::master.

Referenced by ComputeNonbondedCUDA::assignPatches().

01509                                                                {
01510   NonbondedCUDASlaveMsg *msg = new NonbondedCUDASlaveMsg;
01511   msg->master = computeNonbondedCUDAObject;
01512   msg->index = index;
01513   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01514   cm[pe].recvCreateNonbondedCUDASlave(msg);
01515 }

void ComputeMgr::sendCreateNonbondedMICSlave ( int  pe,
int  index 
)

Definition at line 1809 of file ComputeMgr.C.

References NonbondedMICSlaveMsg::index, and NonbondedMICSlaveMsg::master.

01809                                                               {
01810   NonbondedMICSlaveMsg *msg = new NonbondedMICSlaveMsg;
01811   msg->master = computeNonbondedMICObject;
01812   msg->index = index;
01813   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01814   cm[pe].recvCreateNonbondedMICSlave(msg);
01815 }

void ComputeMgr::sendFinishPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1610 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

01610                                                                                    {
01611   for (int i=0;i < pes.size();i++) {
01612     CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01613     SET_PRIORITY(msg, c->sequence(), COMPUTE_PROXY_PRIORITY);
01614     msg->c = c;
01615     thisProxy[pes[i]].recvFinishPatchesOnPe(msg);
01616   }
01617 }

void ComputeMgr::sendFinishPatchOnPe ( int  pe,
CudaComputeNonbonded c,
int  i,
PatchID  patchID 
)

Definition at line 1624 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, COMPUTE_PROXY_PRIORITY, CudaComputeNonbondedMsg::i, PATCH_PRIORITY, PRIORITY_SIZE, Compute::sequence(), and SET_PRIORITY.

01624                                                                                             {
01625   CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01626   SET_PRIORITY(msg, c->sequence(), COMPUTE_PROXY_PRIORITY + PATCH_PRIORITY(patchID));
01627   msg->c = c;
01628   msg->i = i;
01629   thisProxy[pe].recvFinishPatchOnPe(msg);
01630 }

void ComputeMgr::sendFinishReductions ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1651 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::skipPatchesOnPe().

01651                                                                      {
01652   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01653   msg->c = c;
01654   thisProxy[pe].recvFinishReductions(msg);
01655 }

void ComputeMgr::sendLaunchWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1673 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::openBoxesOnPe().

01673                                                                {
01674   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01675   msg->c = c;
01676   thisProxy[pe].recvLaunchWork(msg);
01677 }

void ComputeMgr::sendMessageEnqueueWork ( int  pe,
CudaComputeNonbonded c 
)

Definition at line 1662 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::noWork().

01662                                                                        {
01663   CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01664   msg->c = c;
01665   thisProxy[pe].recvMessageEnqueueWork(msg);
01666 }

void ComputeMgr::sendMICPEData ( int  pe,
int  data 
)

Definition at line 1863 of file ComputeMgr.C.

01863                                                {
01864   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01865   cm.recvMICPEData(pe, data);
01866 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueue ( ComputeNonbondedCUDA c,
int  pe,
int  seq,
int  prio,
int  ws 
)

Definition at line 1552 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedCUDA::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedCUDA::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

Referenced by ComputeNonbondedCUDA::finishWork().

01552                                                                                                          {
01553   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01554   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01555   msg->compute = c;
01556   int type = c->type();
01557   int cid = c->cid;
01558   SET_PRIORITY(msg,seq,prio);
01559   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01560   wdProxy[pe].enqueueCUDA(msg);
01561 }

void ComputeMgr::sendNonbondedCUDASlaveEnqueuePatch ( ComputeNonbondedCUDA c,
int  pe,
int  seq,
int  prio,
int  data,
FinishWorkMsg msg 
)

Definition at line 1563 of file ComputeMgr.C.

References FinishWorkMsg::compute, FinishWorkMsg::data, and SET_PRIORITY.

Referenced by ComputeNonbondedCUDA::messageFinishPatch().

01563                                                                                                                                     {
01564   msg->compute = c;
01565   msg->data = data;
01566   SET_PRIORITY(msg,seq,prio);
01567   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01568   wdProxy[pe].finishCUDAPatch(msg);
01569 }

void ComputeMgr::sendNonbondedCUDASlaveReady ( int  pe,
int  np,
int  ac,
int  seq 
)

Definition at line 1523 of file ComputeMgr.C.

Referenced by ComputeNonbondedCUDA::noWork().

01523                                                                             {
01524   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01525   cm[pe].recvNonbondedCUDASlaveReady(np,ac,seq);
01526 }

void ComputeMgr::sendNonbondedCUDASlaveSkip ( ComputeNonbondedCUDA c,
int  pe 
)

Definition at line 1539 of file ComputeMgr.C.

References NonbondedCUDASkipMsg::compute.

Referenced by ComputeNonbondedCUDA::noWork().

01539                                                                            {
01540   NonbondedCUDASkipMsg *msg = new NonbondedCUDASkipMsg;
01541   msg->compute = c;
01542   thisProxy[pe].recvNonbondedCUDASlaveSkip(msg);
01543 }

void ComputeMgr::sendNonbondedMICSlaveEnqueue ( ComputeNonbondedMIC c,
int  pe,
int  seq,
int  prio,
int  ws 
)

Definition at line 1852 of file ComputeMgr.C.

References Compute::cid, LocalWorkMsg::compute, ComputeNonbondedMIC::localHostedPatches, Compute::localWorkMsg, ComputeNonbondedMIC::localWorkMsg2, SET_PRIORITY, ResizeArray< Elem >::size(), and Compute::type().

01852                                                                                                        {
01853   if ( ws == 2 && c->localHostedPatches.size() == 0 ) return;
01854   LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 );
01855   msg->compute = c;
01856   int type = c->type();
01857   int cid = c->cid;
01858   SET_PRIORITY(msg,seq,prio);
01859   CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib);
01860   wdProxy[pe].enqueueMIC(msg);
01861 }

void ComputeMgr::sendNonbondedMICSlaveReady ( int  pe,
int  np,
int  ac,
int  seq 
)

Definition at line 1823 of file ComputeMgr.C.

01823                                                                            {
01824   CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01825   cm[pe].recvNonbondedMICSlaveReady(np,ac,seq);
01826 }

void ComputeMgr::sendNonbondedMICSlaveSkip ( ComputeNonbondedMIC c,
int  pe 
)

Definition at line 1839 of file ComputeMgr.C.

References NonbondedMICSkipMsg::compute.

01839                                                                          {
01840   NonbondedMICSkipMsg *msg = new NonbondedMICSkipMsg;
01841   msg->compute = c;
01842   thisProxy[pe].recvNonbondedMICSlaveSkip(msg);
01843 }

void ComputeMgr::sendOpenBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1637 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c, PRIORITY_SIZE, PROXY_DATA_PRIORITY, Compute::sequence(), and SET_PRIORITY.

Referenced by CudaComputeNonbonded::doWork().

01637                                                                                {
01638   for (int i=0;i < pes.size();i++) {
01639     CudaComputeNonbondedMsg *msg = new (PRIORITY_SIZE) CudaComputeNonbondedMsg;
01640     SET_PRIORITY(msg, c->sequence(), PROXY_DATA_PRIORITY+1); // after bonded
01641     msg->c = c;
01642     thisProxy[pes[i]].recvOpenBoxesOnPe(msg);
01643   }
01644 }

void ComputeMgr::sendSkipPatchesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1597 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

01597                                                                                  {
01598   for (int i=0;i < pes.size();i++) {
01599     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01600     msg->c = c;
01601     thisProxy[pes[i]].recvSkipPatchesOnPe(msg);
01602   }
01603 }

void ComputeMgr::sendUnregisterBoxesOnPe ( std::vector< int > &  pes,
CudaComputeNonbonded c 
)

Definition at line 1684 of file ComputeMgr.C.

References CudaComputeNonbondedMsg::c.

Referenced by CudaComputeNonbonded::~CudaComputeNonbonded().

01684                                                                                      {
01685   for (int i=0;i < pes.size();i++) {
01686     CudaComputeNonbondedMsg *msg = new CudaComputeNonbondedMsg;
01687     msg->c = c;
01688     thisProxy[pes[i]].recvUnregisterBoxesOnPe(msg);
01689   }
01690 }

void ComputeMgr::sendYieldDevice ( int  pe  ) 

Definition at line 1432 of file ComputeMgr.C.

Referenced by cuda_check_local_calc(), and cuda_check_remote_calc().

01432                                        {
01433     CProxy_ComputeMgr cm(CkpvAccess(BOCclass_group).computeMgr);
01434     cm[pe].recvYieldDevice(CkMyPe());
01435 }

void ComputeMgr::splitComputes (  ) 

Definition at line 173 of file ComputeMgr.C.

References ComputeMap::cloneCompute(), ComputeMap::extendPtrs(), j, ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPartitions(), ComputeMap::Object(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), and ComputeMap::setNumPartitions().

00174 {
00175   if ( ! CkMyRank() ) {
00176     ComputeMap *computeMap = ComputeMap::Object();
00177     const int nc = computeMap->numComputes();
00178 
00179     for (int i=0; i<nc; i++) {
00180       int nnp = computeMap->newNumPartitions(i);
00181       if ( nnp > 0 ) {
00182         if ( computeMap->numPartitions(i) != 1 ) {
00183           CkPrintf("Warning: unable to partition compute %d\n", i);
00184           computeMap->setNewNumPartitions(i,0);
00185           continue;
00186         }
00187         //CkPrintf("splitting compute %d by %d\n",i,nnp);
00188         computeMap->setNumPartitions(i,nnp);
00189         if (computeMap->newNode(i) == -1) {
00190           computeMap->setNewNode(i,computeMap->node(i));
00191         }
00192         for ( int j=1; j<nnp; ++j ) {
00193           int newcid = computeMap->cloneCompute(i,j);
00194           //CkPrintf("compute %d partition %d is %d\n",i,j,newcid);
00195         }
00196       }
00197     }
00198     computeMap->extendPtrs();
00199   }
00200 
00201   if (!CkMyPe())
00202   {
00203     CkStartQD(CkIndex_ComputeMgr::splitComputes2((CkQdMsg*)0), &thishandle);
00204   }
00205 }

void ComputeMgr::splitComputes2 ( CkQdMsg *  msg  ) 

Definition at line 207 of file ComputeMgr.C.

00208 {
00209     delete msg;
00210     CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00211 }

void ComputeMgr::updateComputes ( int  ep,
CkGroupID  chareID 
)

Definition at line 140 of file ComputeMgr.C.

References NAMD_bug().

Referenced by LdbCoordinator::ExecuteMigrations().

00141 {
00142     updateComputesReturnEP = ep;
00143     updateComputesReturnChareID = chareID;
00144     updateComputesCount = CkNumPes();
00145 
00146     if (CkMyPe())
00147     {
00148         NAMD_bug("updateComputes signaled on wrong Pe!");
00149     }
00150 
00151     CkStartQD(CkIndex_ComputeMgr::updateComputes2((CkQdMsg*)0),&thishandle);
00152 }

void ComputeMgr::updateComputes2 ( CkQdMsg *  msg  ) 

Definition at line 154 of file ComputeMgr.C.

References WorkDistrib::saveComputeMapChanges().

00155 {
00156     delete msg;
00157 
00158     CProxy_WorkDistrib wd(CkpvAccess(BOCclass_group).workDistrib);
00159     WorkDistrib  *workDistrib = wd.ckLocalBranch();
00160     workDistrib->saveComputeMapChanges(CkIndex_ComputeMgr::updateComputes3(),thisgroup);
00161 }

void ComputeMgr::updateComputes3 (  ) 

Definition at line 163 of file ComputeMgr.C.

00164 {
00165     if ( skipSplitting ) {
00166       CProxy_ComputeMgr(thisgroup).updateLocalComputes();
00167     } else {
00168       CProxy_ComputeMgr(thisgroup).splitComputes();
00169       skipSplitting = 1;
00170     }
00171 }

void ComputeMgr::updateLocalComputes (  ) 

Definition at line 213 of file ComputeMgr.C.

References ResizeArray< Elem >::add(), ComputeMap::compute(), ProxyMgr::createProxy(), Compute::ldObjHandle, LdbCoordinator::Migrate(), ComputeMap::newNode(), ComputeMap::newNumPartitions(), ComputeMap::node(), ComputeMap::numComputes(), ComputeMap::numPids(), LdbCoordinator::Object(), ComputeMap::Object(), ComputeMap::pid(), ComputeMap::registerCompute(), and ResizeArray< Elem >::resize().

00214 {
00215     ComputeMap *computeMap = ComputeMap::Object();
00216     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00217     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00218     LdbCoordinator *ldbCoordinator = LdbCoordinator::Object();
00219 
00220      computeFlag.resize(0);
00221 
00222     const int nc = computeMap->numComputes();
00223     for (int i=0; i<nc; i++) {
00224 
00225         if ( computeMap->node(i) == CkMyPe() &&
00226              computeMap->newNumPartitions(i) > 1 ) {
00227            Compute *c = computeMap->compute(i);
00228            ldbCoordinator->Migrate(c->ldObjHandle,CkMyPe());
00229            delete c;
00230            computeMap->registerCompute(i,NULL);
00231            if ( computeMap->newNode(i) == CkMyPe() ) computeFlag.add(i); 
00232         } else
00233         if (computeMap->newNode(i) == CkMyPe() && computeMap->node(i) != CkMyPe())
00234         {
00235             computeFlag.add(i);
00236             for (int n=0; n < computeMap->numPids(i); n++)
00237             {
00238                 proxyMgr->createProxy(computeMap->pid(i,n));
00239             }
00240         }
00241         else if (computeMap->node(i) == CkMyPe() &&
00242                  (computeMap->newNode(i) != -1 && computeMap->newNode(i) != CkMyPe() ))
00243         {
00244             // CkPrintf("delete compute %d on pe %d\n",i,CkMyPe());
00245             delete computeMap->compute(i);
00246             computeMap->registerCompute(i,NULL);
00247         }
00248     }
00249 
00250     if (!CkMyPe())
00251     {
00252         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes2((CkQdMsg*)0), &thishandle);
00253     }
00254 }

void ComputeMgr::updateLocalComputes2 ( CkQdMsg *  msg  ) 

Definition at line 257 of file ComputeMgr.C.

00258 {
00259     delete msg;
00260     CProxy_ComputeMgr(thisgroup).updateLocalComputes3();
00261 }

void ComputeMgr::updateLocalComputes3 (  ) 

Definition at line 264 of file ComputeMgr.C.

References ResizeArray< Elem >::clear(), ComputeMap::newNode(), ProxyMgr::nodecount, ComputeMap::numComputes(), ComputeMap::Object(), ProxyMgr::removeUnusedProxies(), ComputeMap::setNewNode(), ComputeMap::setNewNumPartitions(), ComputeMap::setNode(), and ResizeArray< Elem >::size().

00265 {
00266     ComputeMap *computeMap = ComputeMap::Object();
00267     CProxy_ProxyMgr pm(CkpvAccess(BOCclass_group).proxyMgr);
00268     ProxyMgr *proxyMgr = pm.ckLocalBranch();
00269 
00270     ProxyMgr::nodecount = 0;
00271 
00272     const int nc = computeMap->numComputes();
00273 
00274     if ( ! CkMyRank() ) {
00275       for (int i=0; i<nc; i++) {
00276         computeMap->setNewNumPartitions(i,0);
00277         if (computeMap->newNode(i) != -1) {
00278           computeMap->setNode(i,computeMap->newNode(i));
00279           computeMap->setNewNode(i,-1);
00280         }
00281       }
00282     }
00283  
00284     for(int i=0; i<computeFlag.size(); i++) createCompute(computeFlag[i], computeMap);
00285     computeFlag.clear();
00286 
00287     proxyMgr->removeUnusedProxies();
00288 
00289     if (!CkMyPe())
00290     {
00291         CkStartQD(CkIndex_ComputeMgr::updateLocalComputes4((CkQdMsg*)0), &thishandle);
00292     }
00293 }

void ComputeMgr::updateLocalComputes4 ( CkQdMsg *  msg  ) 

Definition at line 296 of file ComputeMgr.C.

References SimParameters::computeMapFilename, ComputeMap::Object(), Node::Object(), ComputeMap::saveComputeMap(), Node::simParameters, simParams, and SimParameters::storeComputeMap.

00297 {
00298     delete msg;
00299     CProxy_ComputeMgr(thisgroup).updateLocalComputes5();
00300 
00301     // store the latest compute map
00302            SimParameters *simParams = Node::Object()->simParameters;
00303     if (simParams->storeComputeMap) {
00304       ComputeMap *computeMap = ComputeMap::Object();
00305       computeMap->saveComputeMap(simParams->computeMapFilename);
00306     }
00307 }

void ComputeMgr::updateLocalComputes5 (  ) 

Definition at line 314 of file ComputeMgr.C.

References ProxyMgr::buildProxySpanningTree2(), PatchMap::checkMap(), ComputeMap::checkMap(), ProxyMgr::Object(), PatchMap::Object(), ComputeMap::Object(), proxyRecvSpanning, proxySendSpanning, and ProxyMgr::sendSpanningTrees().

00315 {
00316     if ( ! CkMyRank() ) {
00317       ComputeMap::Object()->checkMap();
00318       PatchMap::Object()->checkMap();
00319     }
00320 
00321     // we always use the centralized building of spanning tree
00322     // distributed building of ST called in Node.C only
00323     if (proxySendSpanning || proxyRecvSpanning)
00324         ProxyMgr::Object()->buildProxySpanningTree2();
00325 
00326     // this code needs to be turned on if we want to
00327     // shift the creation of ST to the load balancer
00328 
00329 #if 0
00330     if (proxySendSpanning || proxyRecvSpanning)
00331     {
00332         if (firstphase)
00333             ProxyMgr::Object()->buildProxySpanningTree2();
00334         else
00335             if (CkMyPe() == 0)
00336                 ProxyMgr::Object()->sendSpanningTrees();
00337 
00338         firstphase = 0;
00339     }
00340 #endif
00341 
00342     if (!CkMyPe())
00343         CkStartQD(CkIndex_ComputeMgr::doneUpdateLocalComputes(), &thishandle);
00344 }


Member Data Documentation

Definition at line 100 of file ComputeMgr.h.

Referenced by ComputeMgr(), and sendComputeGlobalResults().

Definition at line 98 of file ComputeMgr.h.

Referenced by enableComputeGlobalResults(), and recvComputeGlobalResults().

Definition at line 99 of file ComputeMgr.h.

Referenced by ComputeMgr(), enableComputeGlobalResults(), and recvComputeGlobalResults().


The documentation for this class was generated from the following files:

Generated on 6 Dec 2019 for NAMD by  doxygen 1.6.1