00001 #include <map>
00002 #include <vector>
00003 #include <algorithm>
00004 #include "converse.h"
00005 #include "sockRoutines.h"
00006
00007 #define DEBUGP(x)
00026 #if 1
00027
00028 #include <stdlib.h>
00029 #include <stdio.h>
00030 #include <unistd.h>
00031
00032 #include <stdlib.h>
00033 #include <stdio.h>
00034
00035 #if CMK_BLUEGENEQ
00036 #include "TopoManager.h"
00037 #endif
00038
00039 #if CMK_BLUEGENEQ
00040 #include "spi/include/kernel/process.h"
00041 #endif
00042
00043 #if CMK_CRAYXE || CMK_CRAYXC
00044 extern "C" int getXTNodeID(int mpirank, int nummpiranks);
00045 #endif
00046
00047 #if defined(__APPLE__) && CMK_HAS_MULTIPROCESSING_H
00048 #include <Carbon/Carbon.h>
00049 #include <Multiprocessing.h>
00050 #endif
00051
00052 #if CMK_BIGSIM_CHARM
00053 #include "middle-blue.h"
00054 using namespace BGConverse;
00055 #endif
00056
00057 extern "C" int CmiNumCores(void) {
00058 int a = 1;
00059 #ifdef _WIN32
00060 struct _SYSTEM_INFO sysinfo;
00061 #endif
00062
00063
00064
00065 char *forcecount = getenv("FORCECPUCOUNT");
00066 if (forcecount != NULL) {
00067 if (sscanf(forcecount, "%d", &a) == 1) {
00068 return a;
00069 } else {
00070 a = 1;
00071 }
00072 }
00073
00074 #if defined(__APPLE__) && CMK_HAS_MULTIPROCESSING_H
00075 a = MPProcessorsScheduled();
00076 #endif
00077
00078 #ifdef _WIN32
00079
00080 GetSystemInfo(&sysinfo);
00081 a = sysinfo.dwNumberOfProcessors;
00082 #endif
00083
00084
00085 #ifdef _SC_NPROCESSORS_ONLN
00086 a = sysconf(_SC_NPROCESSORS_ONLN);
00087 #ifdef _SC_NPROCESSORS_CONF
00088
00089 const int b = sysconf(_SC_NPROCESSORS_CONF);
00090 if ( b > a ) a = b;
00091 #endif
00092 #elif defined(_SC_CRAY_NCPU)
00093 a = sysconf(_SC_CRAY_NCPU);
00094 #elif defined(_SC_NPROC_ONLN)
00095 a = sysconf(_SC_NPROC_ONLN);
00096 #endif
00097 #if CMK_BLUEGENEQ
00098 a *= Kernel_ProcessCount();
00099 #endif
00100
00101 if (a < 1) a = 1;
00102
00103 return a;
00104 }
00105
00106 struct _procInfo {
00107 skt_ip_t ip;
00108 int pe;
00109 int ncores;
00110 int rank;
00111 int nodeID;
00112 };
00113
00114 typedef struct _hostnameMsg {
00115 char core[CmiMsgHeaderSizeBytes];
00116 int n;
00117 _procInfo *procs;
00118 } hostnameMsg;
00119
00120 typedef struct _nodeTopoMsg {
00121 char core[CmiMsgHeaderSizeBytes];
00122 int *nodes;
00123 } nodeTopoMsg;
00124
00125 typedef struct _topoDoneMsg {
00126 char core[CmiMsgHeaderSizeBytes];
00127 } topoDoneMsg;
00128
00129
00130 class CpuTopology {
00131 public:
00132 static int *nodeIDs;
00133 static int numPes;
00134 static int numNodes;
00135 static std::vector<int> *bynodes;
00136 static int supported;
00137
00138 ~CpuTopology() {
00139 delete [] bynodes;
00140 }
00141
00142
00143 int numUniqNodes() {
00144 #if 0
00145 if (numNodes != 0) return numNodes;
00146 int n = 0;
00147 for (int i=0; i<CmiNumPes(); i++)
00148 if (nodeIDs[i] > n)
00149 n = nodeIDs[i];
00150 numNodes = n+1;
00151 return numNodes;
00152 #else
00153 if (numNodes > 0) return numNodes;
00154 std::vector<int> unodes(numPes);
00155 int i;
00156 for (i=0; i<numPes; i++) unodes[i] = nodeIDs[i];
00157 std::sort(unodes.begin(), unodes.end());
00158 int last = -1;
00159 std::map<int, int> nodemap;
00160 for (i=0; i<numPes; i++) {
00161 if (unodes[i] != last) {
00162 last=unodes[i];
00163 nodemap[unodes[i]] = numNodes;
00164 numNodes++;
00165 }
00166 }
00167 if (numNodes == 0) {
00168 numNodes = CmiNumNodes();
00169 numPes = CmiNumPes();
00170 }
00171 else {
00172
00173 for (i=0; i<numPes; i++) nodeIDs[i] = nodemap[nodeIDs[i]];
00174 CpuTopology::supported = 1;
00175 }
00176 return numNodes;
00177 #endif
00178 }
00179
00180 void sort() {
00181 int i;
00182 numUniqNodes();
00183 bynodes = new std::vector<int>[numNodes];
00184 if (supported) {
00185 for (i=0; i<numPes; i++){
00186 CmiAssert(nodeIDs[i] >=0 && nodeIDs[i] <= numNodes);
00187 bynodes[nodeIDs[i]].push_back(i);
00188 }
00189 }
00190 else {
00191 for (i=0;i<CmiNumPes();i++) bynodes[CmiNodeOf(i)].push_back(i);
00192 }
00193 }
00194
00195 void print() {
00196 int i;
00197 CmiPrintf("Charm++> Cpu topology info:\n");
00198 CmiPrintf("PE to node map: ");
00199 for (i=0; i<CmiNumPes(); i++)
00200 CmiPrintf("%d ", nodeIDs[i]);
00201 CmiPrintf("\n");
00202 CmiPrintf("Node to PE map:\n");
00203 for (i=0; i<numNodes; i++) {
00204 CmiPrintf("Chip #%d: ", i);
00205 for (int j=0; j<bynodes[i].size(); j++)
00206 CmiPrintf("%d ", bynodes[i][j]);
00207 CmiPrintf("\n");
00208 }
00209 }
00210
00211 };
00212
00213 int *CpuTopology::nodeIDs = NULL;
00214 int CpuTopology::numPes = 0;
00215 int CpuTopology::numNodes = 0;
00216 std::vector<int> *CpuTopology::bynodes = NULL;
00217 int CpuTopology::supported = 0;
00218
00219 namespace CpuTopoDetails {
00220
00221 static nodeTopoMsg *topomsg = NULL;
00222 static CmmTable hostTable;
00223
00224 CpvStaticDeclare(int, cpuTopoHandlerIdx);
00225 CpvStaticDeclare(int, cpuTopoRecvHandlerIdx);
00226 CpvStaticDeclare(int, topoDoneHandlerIdx);
00227
00228 static CpuTopology cpuTopo;
00229 static CmiNodeLock topoLock = 0;
00230 static int done = 0;
00231 static int topoDone = 0;
00232 static int _noip = 0;
00233
00234 }
00235
00236 using namespace CpuTopoDetails;
00237
00238 static void printTopology(int numNodes)
00239 {
00240
00241 const int ways = CmiNumCores();
00242 if (ways > 1)
00243 CmiPrintf("Charm++> Running on %d hosts (%d sockets x %d cores x %d PUs = %d-way SMP)\n",
00244 numNodes, CmiHwlocTopologyLocal.num_sockets,
00245 CmiHwlocTopologyLocal.num_cores / CmiHwlocTopologyLocal.num_sockets,
00246 CmiHwlocTopologyLocal.num_pus / CmiHwlocTopologyLocal.num_cores,
00247 ways);
00248 else
00249 CmiPrintf("Charm++> Running on %d hosts\n", numNodes);
00250
00251 #if !CMK_BLUEGENEQ
00252
00253 if (ways != CmiHwlocTopologyLocal.num_pus)
00254 CmiPrintf("Charm++> Warning: Internally-determined PU count does not match hwloc's result!\n");
00255 #endif
00256 }
00257
00258
00259 static void cpuTopoHandler(void *m)
00260 {
00261 _procInfo *rec;
00262 hostnameMsg *msg = (hostnameMsg *)m;
00263 int tag, tag1, pe;
00264
00265 if (topomsg == NULL) {
00266 int i;
00267 hostTable = CmmNew();
00268 topomsg = (nodeTopoMsg *)CmiAlloc(sizeof(nodeTopoMsg)+CmiNumPes()*sizeof(int));
00269 CmiSetHandler((char *)topomsg, CpvAccess(cpuTopoRecvHandlerIdx));
00270 topomsg->nodes = (int *)((char*)topomsg + sizeof(nodeTopoMsg));
00271 for (i=0; i<CmiNumPes(); i++) topomsg->nodes[i] = -1;
00272 }
00273 CmiAssert(topomsg != NULL);
00274
00275 msg->procs = (_procInfo*)((char*)msg + sizeof(hostnameMsg));
00276 CmiAssert(msg->n == CmiNumPes());
00277 for (int i=0; i<msg->n; i++)
00278 {
00279 _procInfo *proc = msg->procs+i;
00280
00281
00282
00283
00284
00285 tag = *(int*)&proc->ip;
00286 pe = proc->pe;
00287 if ((rec = (_procInfo *)CmmProbe(hostTable, 1, &tag, &tag1)) != NULL) {
00288 }
00289 else {
00290 proc->nodeID = pe;
00291 rec = proc;
00292 CmmPut(hostTable, 1, &tag, proc);
00293 }
00294 topomsg->nodes[pe] = rec->nodeID;
00295 rec->rank ++;
00296 }
00297
00298 printTopology(CmmEntries(hostTable));
00299
00300
00301 hostnameMsg *tmpm;
00302 tag = CmmWildCard;
00303 while ((tmpm = (hostnameMsg *)CmmGet(hostTable, 1, &tag, &tag1)));
00304 CmmFree(hostTable);
00305 CmiFree(msg);
00306
00307 CmiSyncBroadcastAllAndFree(sizeof(nodeTopoMsg)+CmiNumPes()*sizeof(int), (char *)topomsg);
00308 }
00309
00310
00311 static void topoDoneHandler(void *m) {
00312 CmiLock(topoLock);
00313 topoDone++;
00314 CmiUnlock(topoLock);
00315 }
00316
00317
00318 static void cpuTopoRecvHandler(void *msg)
00319 {
00320 nodeTopoMsg *m = (nodeTopoMsg *)msg;
00321 m->nodes = (int *)((char*)m + sizeof(nodeTopoMsg));
00322
00323 CmiLock(topoLock);
00324 if (cpuTopo.nodeIDs == NULL) {
00325 cpuTopo.nodeIDs = m->nodes;
00326 cpuTopo.sort();
00327 }
00328 else
00329 CmiFree(m);
00330 done++;
00331 CmiUnlock(topoLock);
00332
00333
00334 }
00335
00336
00337 static void * combineMessage(int *size, void *data, void **remote, int count)
00338 {
00339 int i, j;
00340 int nprocs = ((hostnameMsg *)data)->n;
00341 if (count == 0) return data;
00342 for (i=0; i<count; i++) nprocs += ((hostnameMsg *)remote[i])->n;
00343 *size = sizeof(hostnameMsg)+sizeof(_procInfo)*nprocs;
00344 hostnameMsg *msg = (hostnameMsg *)CmiAlloc(*size);
00345 msg->procs = (_procInfo*)((char*)msg + sizeof(hostnameMsg));
00346 msg->n = nprocs;
00347 CmiSetHandler((char *)msg, CpvAccess(cpuTopoHandlerIdx));
00348
00349 int n=0;
00350 hostnameMsg *m = (hostnameMsg*)data;
00351 m->procs = (_procInfo*)((char*)m + sizeof(hostnameMsg));
00352 for (j=0; j<m->n; j++)
00353 msg->procs[n++] = m->procs[j];
00354 for (i=0; i<count; i++) {
00355 m = (hostnameMsg*)remote[i];
00356 m->procs = (_procInfo*)((char*)m + sizeof(hostnameMsg));
00357 for (j=0; j<m->n; j++)
00358 msg->procs[n++] = m->procs[j];
00359 }
00360 return msg;
00361 }
00362
00363
00364 static void *emptyReduction(int *size, void *data, void **remote, int count)
00365 {
00366 if (CmiMyPe() != 0) {
00367 CmiLock(topoLock);
00368 topoDone++;
00369 CmiUnlock(topoLock);
00370 }
00371 *size = sizeof(topoDoneMsg);
00372 topoDoneMsg *msg = (topoDoneMsg *)CmiAlloc(sizeof(topoDoneMsg));
00373 CmiSetHandler((char *)msg, CpvAccess(topoDoneHandlerIdx));
00374 return msg;
00375 }
00376
00377
00378
00379 extern "C" int LrtsCpuTopoEnabled()
00380 {
00381 return CpuTopology::supported;
00382 }
00383
00384 extern "C" int LrtsPeOnSameNode(int pe1, int pe2)
00385 {
00386 int *nodeIDs = cpuTopo.nodeIDs;
00387 if (!cpuTopo.supported || nodeIDs == NULL) return CmiNodeOf(pe1) == CmiNodeOf(pe2);
00388 else return nodeIDs[pe1] == nodeIDs[pe2];
00389 }
00390
00391
00392 extern "C" int LrtsNumNodes()
00393 {
00394 if (!cpuTopo.supported) return CmiNumNodes();
00395 else return cpuTopo.numUniqNodes();
00396 }
00397
00398 extern "C" int LrtsNodeSize(int node)
00399 {
00400 return !cpuTopo.supported?CmiNodeSize(node):(int)cpuTopo.bynodes[node].size();
00401 }
00402
00403
00404 extern "C" void LrtsPeOnNode(int node, int **pelist, int *num)
00405 {
00406 *num = cpuTopo.bynodes[node].size();
00407 if (pelist!=NULL && *num>0) *pelist = cpuTopo.bynodes[node].data();
00408 }
00409
00410 extern "C" int LrtsRankOf(int pe)
00411 {
00412 if (!cpuTopo.supported) return CmiRankOf(pe);
00413 const std::vector<int> &v = cpuTopo.bynodes[cpuTopo.nodeIDs[pe]];
00414 int rank = 0;
00415 int npes = v.size();
00416 while (rank < npes && v[rank] < pe) rank++;
00417 CmiAssert(v[rank] == pe);
00418 return rank;
00419 }
00420
00421 extern "C" int LrtsNodeOf(int pe)
00422 {
00423 if (!cpuTopo.supported) return CmiNodeOf(pe);
00424 return cpuTopo.nodeIDs[pe];
00425 }
00426
00427
00428 extern "C" int LrtsNodeFirst(int node)
00429 {
00430 if (!cpuTopo.supported) return CmiNodeFirst(node);
00431 return cpuTopo.bynodes[node][0];
00432 }
00433
00434
00435 extern "C" void LrtsInitCpuTopo(char **argv)
00436 {
00437 static skt_ip_t myip;
00438 hostnameMsg *msg;
00439 double startT;
00440
00441 int obtain_flag = 1;
00442 int show_flag = 0;
00443
00444 if (CmiMyRank() ==0) {
00445 topoLock = CmiCreateLock();
00446 }
00447
00448 #if __FAULT__
00449 obtain_flag = 0;
00450 #endif
00451 if(CmiGetArgFlagDesc(argv,"+obtain_cpu_topology",
00452 "obtain cpu topology info"))
00453 obtain_flag = 1;
00454 if (CmiGetArgFlagDesc(argv,"+skip_cpu_topology",
00455 "skip the processof getting cpu topology info"))
00456 obtain_flag = 0;
00457 if(CmiGetArgFlagDesc(argv,"+show_cpu_topology",
00458 "Show cpu topology info"))
00459 show_flag = 1;
00460
00461 #if CMK_BIGSIM_CHARM
00462 if (BgNodeRank() == 0)
00463 #endif
00464 {
00465 CpvInitialize(int, cpuTopoHandlerIdx);
00466 CpvInitialize(int, cpuTopoRecvHandlerIdx);
00467 CpvInitialize(int, topoDoneHandlerIdx);
00468 CpvAccess(cpuTopoHandlerIdx) =
00469 CmiRegisterHandler((CmiHandler)cpuTopoHandler);
00470 CpvAccess(cpuTopoRecvHandlerIdx) =
00471 CmiRegisterHandler((CmiHandler)cpuTopoRecvHandler);
00472 CpvAccess(topoDoneHandlerIdx) =
00473 CmiRegisterHandler((CmiHandler)topoDoneHandler);
00474 }
00475 if (!obtain_flag) {
00476 if (CmiMyRank() == 0) cpuTopo.sort();
00477 CmiNodeAllBarrier();
00478 CcdRaiseCondition(CcdTOPOLOGY_AVAIL);
00479 return;
00480 }
00481
00482 if (CmiMyPe() == 0) {
00483 #if CMK_BIGSIM_CHARM
00484 if (BgNodeRank() == 0)
00485 #endif
00486 startT = CmiWallTimer();
00487 }
00488
00489 #if CMK_BIGSIM_CHARM
00490 if (BgNodeRank() == 0)
00491 {
00492
00493 int numPes = cpuTopo.numPes = CkNumPes();
00494 cpuTopo.nodeIDs = new int[numPes];
00495 CpuTopology::supported = 1;
00496 int wth = BgGetNumWorkThread();
00497 for (int i=0; i<numPes; i++) {
00498 int nid = i / wth;
00499 cpuTopo.nodeIDs[i] = nid;
00500 }
00501 cpuTopo.sort();
00502 }
00503 return;
00504 #else
00505
00506 #if CMK_USE_GM
00507 CmiBarrier();
00508 #endif
00509
00510
00511 #if 0
00512 if (gethostname(hostname, 999)!=0) {
00513 strcpy(hostname, "");
00514 }
00515 #endif
00516 #if CMK_BLUEGENEQ
00517 if (CmiMyRank() == 0) {
00518 TopoManager tmgr;
00519
00520 int numPes = cpuTopo.numPes = CmiNumPes();
00521 cpuTopo.nodeIDs = new int[numPes];
00522 CpuTopology::supported = 1;
00523
00524 int a, b, c, d, e, t, nid;
00525 for(int i=0; i<numPes; i++) {
00526 tmgr.rankToCoordinates(i, a, b, c, d, e, t);
00527 nid = tmgr.coordinatesToRank(a, b, c, d, e, 0);
00528 cpuTopo.nodeIDs[i] = nid;
00529 }
00530 cpuTopo.sort();
00531 if (CmiMyPe()==0) printTopology(cpuTopo.numNodes);
00532 }
00533 CmiNodeAllBarrier();
00534 #elif CMK_CRAYXE || CMK_CRAYXC
00535 if(CmiMyRank() == 0) {
00536 int numPes = cpuTopo.numPes = CmiNumPes();
00537 int numNodes = CmiNumNodes();
00538 cpuTopo.nodeIDs = new int[numPes];
00539 CpuTopology::supported = 1;
00540
00541 int nid;
00542 for(int i=0; i<numPes; i++) {
00543 nid = getXTNodeID(CmiNodeOf(i), numNodes);
00544 cpuTopo.nodeIDs[i] = nid;
00545 }
00546 int prev = -1;
00547 nid = -1;
00548
00549
00550
00551 for(int i=0; i<numPes; i++) {
00552 if(cpuTopo.nodeIDs[i] != prev) {
00553 prev = cpuTopo.nodeIDs[i];
00554 cpuTopo.nodeIDs[i] = ++nid;
00555 }
00556 else
00557 cpuTopo.nodeIDs[i] = nid;
00558 }
00559 cpuTopo.sort();
00560 if (CmiMyPe()==0) printTopology(cpuTopo.numNodes);
00561 }
00562 CmiNodeAllBarrier();
00563
00564 #else
00565
00566 bool topoInProgress = true;
00567
00568 if (CmiMyPe() >= CmiNumPes()) {
00569 CmiNodeAllBarrier();
00570 #if CMK_MACHINE_PROGRESS_DEFINED
00571 bool waitForSecondReduction = (CmiNumNodes() > 1);
00572 while (topoInProgress) {
00573 CmiNetworkProgress();
00574 CmiLock(topoLock);
00575 if (waitForSecondReduction) topoInProgress = topoDone < CmiMyNodeSize();
00576 else topoInProgress = done < CmiMyNodeSize();
00577 CmiUnlock(topoLock);
00578 }
00579 #endif
00580 return;
00581 }
00582
00583
00584 if (CmiMyRank() == 0)
00585 {
00586 #if CMK_HAS_GETHOSTNAME && !CMK_BLUEGENEQ
00587 myip = skt_my_ip();
00588
00589 #elif CMK_BPROC
00590 myip = skt_innode_my_ip();
00591 #else
00592 if (!CmiMyPe())
00593 CmiPrintf("CmiInitCPUTopology Warning: Can not get unique name for the compute nodes. \n");
00594 _noip = 1;
00595 #endif
00596 cpuTopo.numPes = CmiNumPes();
00597 }
00598
00599 CmiNodeAllBarrier();
00600 if (_noip) return;
00601
00602
00603 msg = (hostnameMsg *)CmiAlloc(sizeof(hostnameMsg)+sizeof(_procInfo));
00604 msg->n = 1;
00605 msg->procs = (_procInfo*)((char*)msg + sizeof(hostnameMsg));
00606 CmiSetHandler((char *)msg, CpvAccess(cpuTopoHandlerIdx));
00607 msg->procs[0].pe = CmiMyPe();
00608 msg->procs[0].ip = myip;
00609 msg->procs[0].ncores = CmiNumCores();
00610 msg->procs[0].rank = 0;
00611 msg->procs[0].nodeID = 0;
00612 CmiReduce(msg, sizeof(hostnameMsg)+sizeof(_procInfo), combineMessage);
00613
00614
00615 while (topoInProgress) {
00616 CsdSchedulePoll();
00617 CmiLock(topoLock);
00618 topoInProgress = done < CmiMyNodeSize();
00619 CmiUnlock(topoLock);
00620 }
00621
00622 if (CmiNumNodes() > 1) {
00623 topoDoneMsg *msg2 = (topoDoneMsg *)CmiAlloc(sizeof(topoDoneMsg));
00624 CmiSetHandler((char *)msg2, CpvAccess(topoDoneHandlerIdx));
00625 CmiReduce(msg2, sizeof(topoDoneMsg), emptyReduction);
00626 if ((CmiMyPe() == 0) || (CmiNumSpanTreeChildren(CmiMyPe()) > 0)) {
00627
00628 topoInProgress = true;
00629 while (topoInProgress) {
00630 CsdSchedulePoll();
00631 CmiLock(topoLock);
00632 topoInProgress = topoDone < CmiMyNodeSize();
00633 CmiUnlock(topoLock);
00634 }
00635 } else {
00636 CmiLock(topoLock);
00637 topoDone++;
00638 CmiUnlock(topoLock);
00639 }
00640 }
00641
00642 if (CmiMyPe() == 0) {
00643 #if CMK_BIGSIM_CHARM
00644 if (BgNodeRank() == 0)
00645 #endif
00646 CmiPrintf("Charm++> cpu topology info is gathered in %.3f seconds.\n", CmiWallTimer()-startT);
00647 }
00648 #endif
00649
00650 #endif
00651
00652
00653 CcdRaiseCondition(CcdTOPOLOGY_AVAIL);
00654 if (CmiMyPe() == 0 && show_flag) cpuTopo.print();
00655 }
00656
00657 #else
00658
00659 extern "C" void LrtsInitCpuTopo(char **argv)
00660 {
00661
00662 int obtain_flag = CmiGetArgFlagDesc(argv,"+obtain_cpu_topology",
00663 "obtain cpu topology info");
00664 CmiGetArgFlagDesc(argv,"+skip_cpu_topology",
00665 "skip the processof getting cpu topology info");
00666 CmiGetArgFlagDesc(argv,"+show_cpu_topology",
00667 "Show cpu topology info");
00668 }
00669
00670 #endif
00671
00672 extern "C" int CmiCpuTopologyEnabled()
00673 {
00674 return LrtsCpuTopoEnabled();
00675 }
00676 extern "C" int CmiPeOnSamePhysicalNode(int pe1, int pe2)
00677 {
00678 return LrtsPeOnSameNode(pe1, pe2);
00679 }
00680 extern "C" int CmiNumPhysicalNodes()
00681 {
00682 return LrtsNumNodes();
00683 }
00684 extern "C" int CmiNumPesOnPhysicalNode(int node)
00685 {
00686 return LrtsNodeSize(node);
00687 }
00688 extern "C" void CmiGetPesOnPhysicalNode(int node, int **pelist, int *num)
00689 {
00690 LrtsPeOnNode(node, pelist, num);
00691 }
00692 extern "C" int CmiPhysicalRank(int pe)
00693 {
00694 return LrtsRankOf(pe);
00695 }
00696 extern "C" int CmiPhysicalNodeID(int pe)
00697 {
00698 return LrtsNodeOf(pe);
00699 }
00700 extern "C" int CmiGetFirstPeOnPhysicalNode(int node)
00701 {
00702 return LrtsNodeFirst(node);
00703 }
00704 extern "C" void CmiInitCPUTopology(char **argv)
00705 {
00706 LrtsInitCpuTopo(argv);
00707 }
00708