Dataset Viewer
Auto-converted to Parquet Duplicate
ID
int64
0
88
Nature Language
stringlengths
33
344
CPP
stringlengths
85
3.03k
Fortran
stringlengths
81
7.76k
0
openFile :Attempts to open the file passed as an argument.
int openFile(char *fileName){\n\tprintf(\"Attempting to open %s ....\",fileName);\n\tinputFile = fopen(fileName, \"r\");\n\t/* Check that file opened successfully */\n\tif (inputFile == NULL){\n\t\tprintf(\"ERROR.\\n\");\n\t}\n\telse{\n\t\tprintf(\"Success.\\n\");\n\t}\n\treturn 0;\n}
SUBROUTINE openFile()\n\tinteger :: argStatus\n\tcharacter (len = MAXSTRING) :: fileName\n\n\t!Read input file name from command line\n\t!CALL getarg(1,fileName)\n\tCALL GET_COMMAND_ARGUMENT(1,fileName,STATUS=argStatus)\n\tIF(argStatus > 0) THEN\n\t print , 'ERROR Reading input file from command line.'\n\t print , 'Usa...
1
closeFile:Closes the input file.
int closeFile(){\n\t/* close the input file */\n\tfclose(inputFile);\n\treturn 0;\n}
SUBROUTINE closeFile()\n\n\t!Close file\n\tCLOSE(10)\n\nEND SUBROUTINE closeFile
2
setupBenchmarkList:Subroutine to setup the benchmarkList array with the list of all possible benchmarks.
int setupBenchmarkList(){\n\t/* Pingpong benchmarks */\n\tstrcpy (benchmarkList[0], "masteronlypingpong");\n\tstrcpy (benchmarkList[1], "funnelledpingpong");\n\tstrcpy (benchmarkList[2], "multiplepingpong");\n\t/* Pingping benchmarks */\n\tstrcpy (benchmarkList[3], "masteronlypingping");\n\tstrcpy (benchmarkList[4], "f...
SUBROUTINE readBenchmarkParams()\n\n\t!Rank 0 reads parameters from input file\n\tIF (myMPIRank == 0) THEN\n\t write (,) 'Reading parameters from input file....'\n\t !read minimum data size from input file\n\t read(10,) minDataSize \n\t !read maximum data size from input file\n\t read(10,) maxDataSize\n\t !read target ...
3
readBenchmarkParams:Initialises the benchmark parameters.Reads the minimum and maximum data for the benchmarks from the input file (unit 10).
int readBenchmarkParams(){\n\t/* Rank 0 reads parameters from input file */\n\tif (myMPIRank == 0){\n\t\tprintf ("Reading parameters from input file....\\n");\n\t\t/* read minimum data size from input file */\n\t\tfscanf(inputFile, "%d", &minDataSize);\n\t\t/* read maximum data size from input file */\n\t\tfscanf(input...
SUBROUTINE setupBenchmarkList()\n\n\t!Pingpong benchmarks\n\tbenchmarkList(1) = 'masteronlypingpong'\n\tbenchmarkList(2) = 'funnelledpingpong'\n\tbenchmarkList(3) = 'multiplepingpong'\n\t!Pingping benchmarks\n\tbenchmarkList(4) = 'masteronlypingping'\n\tbenchmarkList(5) = 'funnelledpingping'\n\tbenchmarkList(6) = 'mult...
4
findBenchmarkNumber:Finds the ID of the next benchmark which will be executed. Master MPI process reads next line from input file. It then compares it to the benchmark list to find the ID and broadcasts this to the other MPI processes. The function sets the benchmarkNumber variable and also returns the benchmarkNumber....
int findBenchmarkNumber(){\n\tchar benchmarkName[MAXSTRING];\n\tint rankInA, rankInB;\n\tint i;\n\n\t/* Master MPI process reads next line from file */\n\tif (myMPIRank == 0){\n\t\t/* set benchmarkNumber to ERROR before read to allow error\n\t\tcheck */\n\t\tbenchmarkNumber = ERROR;\n\n\t\t/* read next benchmark from f...
SUBROUTINE findBenchmarkNumber()\n\tcharacter (len = MAXSTRING) :: benchmarkName\n\tinteger :: rankInA, rankInB\n\tinteger :: i\n\n\t!Master MPI process reads next line from file\n\tIF (myMPIRank == 0) THEN\n\t\t!set benchmarkNumber to ERROR before read\n\t\t!to allow error check\n\t\tbenchmarkNumber = ERROR\n\t\t\n\t\...
5
convertToLowerCase:Takes a string as an agrument and converts all uppercase characters to lowercase using its ASCII value.
int convertToLowercase(char *convertString){\n\tint i;\n\tint len;\n\n\tlen = strlen(convertString);\n\n\tfor (i=0; i<len; i++){\n\t\tconvertString[i] = tolower(convertString[i]);\n\t}\n\n\treturn 0;\n}
SUBROUTINE convertToLowerCase(string)\n\tcharacter (len = *), intent(inout) :: string\n\tinteger :: i, length\n\n\t!Find length of string.\n\tlength = LEN(string)\n\t\n\t!Loop through each character of string.\n\tDO i = 1, length\n\t\t!If character between A and Z...\n\t\tIF((string(i:i) >= 'A') .AND. (string(i:i)) <= ...
6
repTimeCheck:Checks if the time for the benchmark reached the target time. Changes the number of repetitions for the next data size based on the difference between the time taken and the target time.
int repTimeCheck(double time, int numReps){\n\tint repCheck;\n\n\tif (time < targetTime){\n\t\trepsToDo = 2 * numReps;\n\t\trepCheck = FALSE;\n\t}\n\telse if (time > (2 * targetTime)){\n\t\trepsToDo = max(numReps/2,1);\n\t\trepCheck = TRUE;\n\t}\n\telse {\n\t\trepCheck = TRUE;\n\t}\n\n\treturn repCheck;\n}
FUNCTION repTimeCheck(time, numReps)\n\tDOUBLE PRECISION, intent(in) :: time\n\tinteger, intent(in) :: numReps\n\tlogical :: repTimeCheck\n\n\tIF (time < targetTime) THEN\n\t\t!double repsToDo and repeat benchmark\n\t\trepsToDo = 2numReps\n\t\trepTimeCheck = .false.\n\tELSE IF (time > (2targetTime)) THEN\n\t\t!finish b...
7
alltoall:Driver routine for the alltoall benchmark.
int alltoall(){ int dataSizeIter; int bufferSize; repsToDo = defaultReps; dataSizeIter = minDataSize; while (dataSizeIter <= maxDataSize){ bufferSize = dataSizeIter * numThreads * numMPIprocs * numThreads; allocateAlltoallData(bufferSize); alltoallKernel(warmUpIters, dataSizeIter); testAlltoall(dataSizeIter); benchComp...
SUBROUTINE alltoall()\n\tinteger :: dataSizeIter\n\tinteger :: bufferSize\n\n\t!Initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Start loop over data sizes\n\tdataSizeIter = minDataSize !initialise dataSizeIter\n\tDO WHILE (dataSizeIter <= maxDataSize)\n\t\t!Calculate buffer size and allocate space fo...
8
alltoallKernel:Implements the all to all benchmark. Each thread sends/receives dataSize items to/from every other process.
int alltoallKernel(int totalReps, int dataSize){ int repIter, i, j; int dataForEachProc, numsToWrite; int blockNum, startOffset; numsToWrite = numThreads * dataSize; dataForEachProc = numThreads * numThreads * dataSize; for (repIter=0; repIter<totalReps; repIter++){ #pragma omp parallel default(none) private(blockNum,i...
SUBROUTINE alltoallKernel(totalReps,dataSize)\n\tinteger, intent(in) :: totalReps, dataSize\n\tinteger :: repIter, i, j\n\tinteger :: dataForEachProc, numsToWrite\n\tinteger :: blockNum, startOffset\n\n\t!Calculate how much data each thread sends to each process\n\tnumsToWrite = numThreads * dataSize\n\t!Calculate tota...
9
allocateAlltoallData: Allocates memory for the main data arrays used in the alltoall benchmark.
int allocateAlltoallData(int bufferSize){\n\talltoallSendBuf = (int *) malloc(bufferSize * sizeof(int));\n\talltoallRecvBuf = (int *) malloc(bufferSize * sizeof(int));\n\talltoallFinalBuf = (int *) malloc(bufferSize * sizeof(int));\n\n\treturn 0;\n}
SUBROUTINE allocateData(bufferSize)\n\tinteger, intent(in) :: bufferSize\n\n\tallocate(alltoallSendBuf(bufferSize))\n\tallocate(alltoallRecvBuf(bufferSize))\n\tallocate(alltoallFinalBuf(bufferSize))\n\nEND SUBROUTINE allocateData
10
freeAlltoallData:Free memory of the main data arrays.
int freeAlltoallData() {\n free(alltoallSendBuf);\n free(alltoallRecvBuf);\n free(alltoallFinalBuf);\n return 0;\n}",
SUBROUTINE freeData()\n\n\tdeallocate(alltoallSendBuf)\n\tdeallocate(alltoallRecvBuf)\n\tdeallocate(alltoallFinalBuf)\n\nEND SUBROUTINE freeData
11
testAlltoall:Verifies that the all to all completed successfully.
int testAlltoall(int dataSize){\n int sizeofBuffer, i, j;\n int dataForEachThread, startElem;\n int testFlag, reduceFlag;\n int *testBuf;\n\n testFlag = TRUE;\n sizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads;\n testBuf = (int *) malloc(sizeofBuffer * sizeof(int));\n\n dataForEac...
SUBROUTINE testAlltoall(dataSize)\n\n\tinteger, intent(in) :: dataSize\n\tinteger :: sizeofBuffer, i, j\n\tinteger :: dataForEachThread, startElem\n\tlogical :: testFlag, reduceFlag\n\n\t!Set testFlag to true\n\ttestFlag = .true.\n\n\t!calculate the size of buffer on each process and allocate\n\tsizeofBuffer = dataSize...
12
barrierDriver:Driver subroutine for the barrier benchmark.
int barrierDriver(){ \n repsToDo = defaultReps; \n barrierKernel(warmUpIters); \n benchComplete = FALSE; \n \n while (benchComplete != TRUE){ \n MPI_Barrier(comm); \n startTime = MPI_Wtime(); \n barrierKernel(repsToDo); \n MPI_Barrier(comm); \n finishTime = MPI_Wtime();...
SUBROUTINE barrierDriver()\n\n\t!initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Perform warm-up for benchmark\n\tCALL barrierKernel(warmUpIters)\n\n\t!Initialise the benchmark \n\tbenchComplete = .false.\n\t!Execute benchmark until target time is reached\n\tDO WHILE (benchComplete .NEQV. .true.)\n\t...
13
barrierKernel:Main kernel for barrier benchmark. First threads under each process synchronise with an OMP BARRIER. Then a MPI barrier synchronises each MPI process. MPI barrier is called within a OpenMP master directive.
int barrierKernel(int totalReps){ \n int repIter; \n\n #pragma omp parallel default(none) \\ \n private(repIter) \\ \n shared(totalReps,comm) \n { \n for (repIter=0; repIter<totalReps; repIter++){ \n #pragma omp barrier \n\n #pragma omp master \n { \n ...
SUBROUTINE barrierKernel(totalReps)\n\tinteger, intent(in) :: totalReps\n\tinteger :: repIter\n\n\t!Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(repIter), &\n!$OMP SHARED(totalReps,comm,ierr) \n\n\tDO repIter = 1, totalReps\n\n\t\t!Threads synchronise with an OpenMP barrier\n!$OMP BARRIER\n\...
14
broadcast:Driver subroutine for the broadcast benchmark.
int broadcast() {\n int dataSizeIter, sizeofFinalBuf;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize;\n \n while (dataSizeIter <= maxDataSize) {\n allocateBroadcastData(dataSizeIter);\n broadcastKernel(warmUpIters,dataSizeIter);\n sizeofFinalBuf = dataSizeIter * numThreads;\n...
SUBROUTINE broadcast()\n\tinteger :: dataSizeIter\n\tinteger :: sizeofFinalBuf !needed for test\n\n\t!initialise repsToDo to defaultReps\n\trepsToDo = defaultReps\n\n\t!Start loop over data sizes\n\tdataSizeIter = minDataSize \n\tDO WHILE (dataSizeIter <= maxDataSize) \n\t\t!allocate space for main data arrays\n\t\tCAL...
15
broadcastKernel:The broadcast benchmark. At the start one process owns the data. After, all processes and threads have a copy of the data.
int broadcastKernel(int totalReps, int dataSize){\n int repIter, i;\n int startPos;\n\n for (repIter=0; repIter<totalReps; repIter++){\n if (myMPIRank == BROADCASTROOT){\n for (i=0; i<dataSize; i++){\n broadcastBuf[i] = BROADCASTNUM;\n }\n }\n\n MPI_Bca...
SUBROUTINE broadcastKernel(totalReps, dataSize)\n\tinteger, intent(in) :: totalReps, dataSize\n\tinteger :: repIter, i\n\t!Set source of broadcast\n\tinteger, parameter :: BROADCASTROOT = 0\n\t!Start position in finalBroadcastBuf of each thread.\n\tinteger :: startPos \n\n\tDO repIter = 1, totalReps\n\n\t\t!Master MPI ...
16
allocateBroadcastData:Allocate memory for the main data arrays in the broadcast operation.
int allocateBroadcastData(int bufferSize){\n broadcastBuf = (int *)malloc(bufferSize * sizeof(int));\n finalBroadcastBuf = (int *)malloc((bufferSize*numThreads)*sizeof(int));\n return 0;\n}",
SUBROUTINE allocateData(bufferSize)\n\tinteger, intent(in) :: bufferSize\n\n\tallocate(broadcastBuf(bufferSize))\n\t!finalBroadcastBuf is of size dataSizenumThreads\n\tallocate(finalBroadcastBuf(bufferSizenumThreads))\n\nEND SUBROUTINE allocateData
17
freeBroadcastData:Free memory of main data arrays.
int freeBroadcastData(){\n free(broadcastBuf);\n free(finalBroadcastBuf);\n return 0;\n}",
SUBROUTINE freeData()\n\n\tdeallocate(broadcastBuf)\n\tdeallocate(finalBroadcastBuf)\n\nEND SUBROUTINE freeData
18
testBroadcast:Verifies that the broadcast benchmark worked correctly.
int testBroadcast(int bufferSize){\n int i, testFlag, reduceFlag;\n testFlag = TRUE;\n\n for (i=0; i<bufferSize; i++){\n if (finalBroadcastBuf[i] != BROADCASTNUM){\n testFlag = FALSE;\n }\n }\n\n MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);\n\n if (myMPIR...
SUBROUTINE testBroadcast(bufferSize)\n\n\tinteger, intent(in) :: bufferSize\n\tinteger :: i\n\tlogical :: testFlag, reduceFlag\n\n\t!Initialise testFlag to true\n\ttestFlag = .true.\n\n\t!Compare each element of finalBroadcastBuf with BROADCASTNUM\n\tDO i = 1, bufferSize\n\t\tIF (finalBroadcastBuf(i) /= BROADCASTNUM) T...
19
reduction:Driver subroutine for the reduce and allReduce benchmarks.
int reduction(int benchmarkType){\n int dataSizeIter, sizeofBuf;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize;\n\n while (dataSizeIter <= maxDataSize){\n allocateReduceData(dataSizeIter);\n\n if (benchmarkType == REDUCE){\n reduceKernel(warmUpIters, dataSizeIter);\n\n ...
SUBROUTINE reduction(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\ninteger :: sizeofBuf !for allReduce operation\n!initialise repsToDo to defaultReps\nrepsToDo = defaultReps\n!Start loop over data sizes\ndataSizeIter = minDataSize !initialise dataSizeIter\nDO WHILE (dataSizeIter <= maxD...
20
reduceKernel:Implements the reduce mixed mode benchmark. Each thread under every MPI process combines its local buffer. This is then sent to the master MPI process to get the overall reduce value.
int reduceKernel(int totalReps, int dataSize){\n int repIter, i, j;\n\n for (repIter=1; repIter<totalReps; repIter++){\n #pragma omp parallel default(none) \\\n private(i,j) \\\n shared(tempBuf,globalIDarray,dataSize,numThreads) \\\n shared(localReduceBuf)\n {\n #prag...
SUBROUTINE reduceKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter,i\n!Decalre array which each thread reduces into\ninteger, dimension(dataSize) :: localReduceBuf\n\nDO repIter = 1, totalReps !loop for totalReps\n\n !initialise all reduce arrays to ensure correct results\n loc...
21
allReduce:Implements the allreduce mixed mode benchmark. Each thread under every MPI process combines its local buffer. All MPI processes then combine this value to the overall reduction value at each process.
int allReduceKernel(int totalReps, int dataSize){\n int repIter, i, j;\n int startPos;\n\n for (repIter=0; repIter<totalReps; repIter++){\n\n #pragma omp parallel default(none) \\\n private(i,j) \\\n shared(tempBuf,globalIDarray,dataSize,numThreads) \\\n shared(localRedu...
SUBROUTINE allReduceKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter,i\ninteger :: startPos\n!Decalre array which each thread reduces into\ninteger, dimension(dataSize) :: localReduceBuf\n\n DO repIter = 1, totalReps !loop for totalReps\n\n !initialise all reduce arrays to ens...
22
allocateReduceData:Allocate memory for the main data arrays in the reduction operation.
int allocateReduceData(int bufferSize){\n localReduceBuf = (int *) malloc(bufferSize * sizeof(int));\n globalReduceBuf = (int *) malloc(bufferSize * sizeof(int));\n tempBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));\n finalReduceBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));\...
SUBROUTINE allocateData(bufferSize)\ninteger, intent(in) :: bufferSize\n\nallocate(globalReduceBuf(bufferSize))\n!Final reduce is of size dataSizenumThreads\nallocate(finalReduceBuf(bufferSizenumThreads))\n\nEND SUBROUTINE allocateData
23
freeReduceData:Free allocated memory for main data arrays.
int freeReduceData(){\n free(localReduceBuf);\n free(globalReduceBuf);\n free(tempBuf);\n free(finalReduceBuf);\n return 0;\n}",
SUBROUTINE freeData()\ndeallocate(globalReduceBuf)\ndeallocate(finalReduceBuf)\nEND SUBROUTINE freeData
24
testReduce:Verifies that the reduction benchmarks worked correctly.
int testReduce(int bufferSize, int benchmarkType){ \n int i, testFlag, reduceFlag; \n int correctReduce, lastGlobalID; \n correctReduce = 0; \n testFlag = TRUE; \n lastGlobalID = (numMPIprocs * numThreads); \n\n for (i=0; i<lastGlobalID; i++){ \n correctReduce = correctReduce + i; \n } \n\n ...
SUBROUTINE testReduce(bufferSize,benchmarkType)\ninteger, intent(in) :: bufferSize, benchmarkType\ninteger :: i\ninteger :: correctReduce, lastGlobalID\nlogical :: testFlag, reduceFlag\n\n!Initialise correctReduce to 0..\ncorrectReduce = 0\n!..and testFlag to true\ntestFlag = .true.\n\n!set lastGlobalID\nlastGlobalID =...
25
scatterGather:Driver routine for the scatter benchmark.
int scatterGather(int benchmarkType){\n int dataSizeIter, bufferSize;\n repsToDo = defaultReps;\n dataSizeIter = minDataSize; /* initialise dataSizeIter */\n \n while (dataSizeIter <= maxDataSize){\n bufferSize = dataSizeIter * numThreads;\n\n if (benchmarkType == SCATTER){\n all...
SUBROUTINE scatterGather(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\ninteger :: bufferSize\n\n!Initialise repsToDo to defaultReps\nrepsToDo = defaultReps\n\n!Start loop over data sizes\ndataSizeIter = minDataSize !initialise dataSizeIter\nDO WHILE (dataSizeIter <= maxDataSize)\n!Calcu...
26
scatterKernel:Implement the scatter benchmark. Root process first scatters send buffer to other processes. Each thread under a MPI process then reads its portion of scatterRecvBuf.
int scatterKernel(int totalReps, int dataSize){\\n\n int repIter, i;\\n\n int totalSendBufElems, sendCount, recvCount;\\n\n totalSendBufElems = numMPIprocs * numThreads * dataSize;\\n\n sendCount = dataSize * numThreads;\\n\n recvCount = sendCount;\\n\n for (repIter=0; repIter<totalReps; repIter++){\\...
SUBROUTINE scatterKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: totalSendBufElems, sendCount, recvCount\n\n!Calculate totalSendBufElems\ntotalSendBufElems = numMPIprocs * numThreads * dataSize\n!Calculate sendCount\nsendCount = dataSize * numThreads\nrecvCoun...
27
gatherKernel:Implements the gather benchmark. Each thread writes part of its buffer then all data is gathered to the master process.
int gatherKernel(int totalReps, int dataSize){\n int repIter, i;\n int totalRecvBufElems, sendCount, recvCount;\n int startVal;\n\n totalRecvBufElems = dataSize * numThreads * numMPIprocs;\n sendCount = dataSize * numThreads;\n recvCount = sendCount;\n startVal = (myMPIRank * sendCount) + GATHERSTA...
SUBROUTINE gatherKernel(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: totalRecvBufElems\ninteger :: sendCount, recvCount\ninteger :: startVal\n\n!Calculate totalRecvBufElems\ntotalRecvBufElems = dataSize * numThreads * numMPIprocs\n!Each process calculates its send ...
28
allocateScatterGatherData:Allocate memory for main data arrays
int allocateScatterGatherData(int bufferSize, int benchmarkType){ \n if (benchmarkType == SCATTER){ \n if (myMPIRank == SCATTERROOT){ \n scatterSendBuf = (int *) malloc((bufferSize * numMPIprocs) * sizeof(int)); \n } \n scatterRecvBuf = (int *) malloc(bufferSize * sizeof(int)); \n finalBuf = (int *)ma...
SUBROUTINE allocateData(bufferSize, benchmarkType)\ninteger, intent(in) :: bufferSize, benchmarkType\n\nIF (benchmarkType == SCATTER) THEN !Allocate for scatter\n\n!scatterSendBuf is size (bufferSize * numMPIprocs)\nIF (myMPIRank == SCATTERROOT) THEN\nallocate(scatterSendBuf(bufferSizenumMPIprocs))\nEND IF\nallocate(sc...
29
freeScatterGatherData:Free memory of main data arrays.
int freeScatterGatherData(int benchmarkType){\\n\n if (benchmarkType == SCATTER){\\n\n if (myMPIRank == SCATTERROOT){\\n\n free(scatterSendBuf);\\n\n }\\n\n free(scatterRecvBuf);\\n\n free(finalBuf);\\n\n } else if (benchmarkType == GATHER){\\n\n free(gatherSendBuf);\...
SUBROUTINE freeData(benchmarkType)\ninteger, intent(in) :: benchmarkType\n\nIF (benchmarkType == SCATTER) THEN\n\nIF (myMPIRank == SCATTERROOT) THEN\ndeallocate(scatterSendBuf)\nEND IF\ndeallocate(scatterRecvBuf)\ndeallocate(finalBuf)\n\nELSE IF (benchmarkType == GATHER) THEN\n\ndeallocate(gatherSendBuf)\nIF (myMPIRank...
30
testScatterGather:Verifies that the scatter and gahter benchmarks worked correctly.
int testScatterGather(int sizeofBuffer, int benchmarkType){\\n\n int i, startVal;\\n\n int testFlag, reduceFlag;\\n\n int *testBuf;\\n\n testFlag = TRUE;\\n\n testBuf = (int *) malloc (sizeofBuffer * sizeof(int));\\n\n if (benchmarkType == SCATTER){\\n\n startVal = (myMPIRank * sizeofBuffer) + ...
SUBROUTINE testScatterGather(sizeofBuffer, benchmarkType)\ninteger, intent(in) :: sizeofBuffer, benchmarkType\ninteger :: i\ninteger :: startVal\nlogical :: testFlag, reduceFlag\n\n!initialise testFlag to true\ntestFlag = .true.\n\n!Allocate space for testBuf\nallocate(testBuf(sizeofBuffer))\n\nIF (benchmarkType == SCA...
31
Main driver for mixed mode benchmark program:Reads benchmark input file. Initialises the parallel environment. Calls each benchmark.
int main(int argc, char *argv[]){\n int supportFlag;\n char name[MAXSTRING];\n\n initParallelEnv();\n\n if (myMPIRank == 0){\n if (argc != 2){\n printf(\"ERROR Reading input file from command line.\\n\");\n printf(\"Usage: %s <filename>\", argv[0] );\n finaliseParalle...
PROGRAM mixedModeBenchmark\n use pt_to_pt_pingpong\n use pt_to_pt_pingping\n use pt_to_pt_multiPingPong\n use pt_to_pt_multiPingPing\n use pt_to_pt_haloExchange\n use collective_barrier\n use collective_reduction\n use collective_broadcast\n use collective_scatterGather\n use collective_alltoall\n use parallelEnvironme...
32
printHeader:Prints a header in the output.
int printHeader(){\n char string[MAXSTRING];\n threadSupportToString(benchReport.supportLevel, string);\n printf(\"----------------------------------------------\\n\");\n printf(\"Mixed mode MPI/OpenMP benchmark suite v1.0\\n\");\n printf(\"----------------------------------------------\\n\");\n print...
SUBROUTINE printHeader(numProcs, numThreads, threadSupport)\n integer, intent(in) :: numProcs, numThreads, threadSupport\n character (len = MAXSTRING) :: string\n\n !Convert threadSupport to a string for output\n CALL threadSupportToString(threadSupport, string)\n\n write(,) "-------------------------------------------...
33
setBenchName:Sets the benchName, benchNumber and if the benchmark is supported.
int setBenchName(char *name, int number, int support){ \n strcpy(benchReport.benchName,name); \n benchReport.benchNumber = number; \n benchReport.supported = support; \n printBenchName(); \n return 0; \n}",
SUBROUTINE setBenchName(name,number,support)\n character (len = MAXSTRING), intent (in) :: name\n integer, intent(in) :: number\n logical, intent(in) :: support\n\n benchReport%benchName = name\n benchReport%benchNumber = number\n benchReport%supported = support\n\n CALL printBenchName()\n\n END SUBROUTINE setBenchName...
34
printBenchName:Print header for benchmark - name of benchmark and list of names of each column.
int printBenchName(){\n\tprintf("--------------------------------------------\n");\n\tprintf("# %s\n", benchReport.benchName);\n\tprintf("--------------------------------------------\n");\n\n\tif (benchReport.supported == FALSE){\n\t\tprintf("WARNING: Implementation does not support benchmark.\n");\n\t}\n\n\treturn 0;\...
SUBROUTINE printBenchName()\n write(,) "--------------------------------------------"\n write(,) "# ", benchReport%benchName\n write(,) "--------------------------------------------"\n\n !print warning if benchmark not supported\n IF (benchReport%supported .EQV. .false.) THEN\n write(,) "WARNING: Implementation does no...
35
printNodeReport:For pingpong and pingping benchmarks prints out if the two MPI processes are on the same node or not.
int printNodeReport(int sameNode, int rankA, int rankB){\n\tif (sameNode == TRUE){\n\t\tprintf("Intra node benchmark between process %d and process %d\n",rankA,rankB);\n\t}\n\telse if (sameNode == FALSE){\n\t\tprintf("Inter node benchmark between process %d and process %d\n",rankA,rankB);\n\t}\n\n\treturn 0;\n}
SUBROUTINE printNodeReport(sameNode,rankA,rankB)\n integer, intent(in) :: rankA, rankB\n logical, intent(in) :: sameNode\n IF (sameNode .EQV. .true.) THEN\n write(,) "Intra node benchmark between process",rankA, "and process", rankB \n ELSE IF (sameNode .EQV. .false.) THEN\n write(,) "Inter node benchmark between proce...
36
printBenchHeader:Prints the column headings for the benchmark report.
int printBenchHeader(){\n\tprintf(" Data Size Msg Size (bytes) No. Reps ");\n\tprintf("Time (sec) Time/Rep (s) Test\n");\n\n\tprintf("----------- ------------------ ---------- ");\n\tprintf("------------ -------------- ------\n");\n\n\treturn 0;\n}
SUBROUTINE printBenchHeader()\n\n write(,fmt="(2x,a9,5x,a16,5x,a8,5x,a10,5x,a12,5x,a4)")&\n "Data Size","Msg Size (bytes)","No. Reps",&\n "Time (sec)","Time/Rep (s)","Test"\n write(,fmt="(1x,a11,3x,a18,3x,a10,3x,a12,3x,a14,3x,a6)")&\n "-----------","------------------","----------",&\n "------------","--------------","...
37
setTestOutcome:Sets benchReport's testOutcome element. Called in test routine of each benchmark.
int setTestOutcome(int outcome){\n if (outcome == TRUE){\n strcpy(benchReport.testOutcome,\"Pass\");\n }\\n\n else if (outcome == FALSE){\n strcpy(benchReport.testOutcome,\"Fail\");\n }\\n\n return 0;\n}",
SUBROUTINE setTestOutcome(outcome)\n logical, intent(in) :: outcome\n\n benchReport%testOutcome = outcome\n\n END SUBROUTINE setTestOutcome
38
setReportParams:Sets the numReps and benchTime for a certain datasize
int setReportParams(int size, int reps, double time){\n benchReport.dataSize = size; \n benchReport.numReps = reps; \n benchReport.benchTime = time; \n benchReport.timePerRep = time / reps; \n \n if (benchReport.benchNumber <= LAST_PT_PT_ID){\n /* dataSize x numThreads x sizeof(int) */\n ...
SUBROUTINE setReportParams(size,reps,time)\n integer, intent(in) :: size, reps\n DOUBLE PRECISION, intent(in) :: time\n\n benchReport%dataSize = size\n benchReport%numReps = reps\n benchReport%benchTime = time\n !Calculate and set time for 1 rep\n benchReport%timePerRep = time/reps\n !Calculate the size of message in b...
39
printMultiProcInfo:This prints the comm world ranks and processor names for each pair of processes in the multi-pingpong or multi-pingping benchmarks.
int printMultiProcInfo(int printNode, int pairWorldRank, char *pairProcName){ \n if (crossCommRank == printNode){ \n printf(\"MPI process %d on %s \", myMPIRank,myProcName); \n printf(\"communicating with MPI process %d on %s\\n\", pairWorldRank,pairProcName); \n } \n return 0; \n}",
SUBROUTINE printMultiProcInfo(printNode, pairWorldRank, pairProcName)\n integer, intent(in) :: printNode, pairWorldRank\n character (len = MPI_MAX_PROCESSOR_NAME) :: pairProcName\n IF (crossCommRank == printNode) THEN\n print *, "MPI process ", myMPIRank, "on ", trim(myProcName), &\n " commumicating with MPI process ",...
40
printReport:Prints out the a column of information after each data size iteration.
int printReport(){\n printf(\"d %d\\t\\t%d\\t\\t %d\\t\\t%lf\\t%lf\\t%s\\n\", \\\n benchReport.dataSize, benchReport.bytes, benchReport.numReps, \\\n benchReport.benchTime, benchReport.timePerRep, benchReport.testOutcome);\n return 0;\n}",
SUBROUTINE printReport()\n character (len =4) testString\n IF(benchReport%testOutcome .EQV. .true.) THEN\n testString = "Pass"\n ELSE\n testString = "Fail"\n END IF\n write(*,fmt="('d',i10,5x,i16,5x,i8,5x,f10.6,4x,f14.9,5x,a4)")&\n benchReport%dataSize, benchReport%bytes,&\n benchReport%numReps,benchReport%benchTime,&\...
41
printBalanceError:Prints an error if there isn't the same number of MPI processes in the nodes selected for the multi-pingpong or multi-pingping benchmarks.
int printBalanceError(){ \n printf(\"\\nERROR: Nodes selected for this benchmark do not\\n\"); \n printf(\"have same number of MPI processes per node.\\n\"); \n printf(\"Skipping benchmark...\\n\"); \n return 0; \n}",
SUBROUTINE printBalanceError()\n print *, ""\n print *, "ERROR: Nodes selected for this benchmark do not",\n "have same number of MPI processes per node.",\n "Skipping benchmark..."\n print *, ""\n END SUBROUTINE printBalanceError
42
threadSupportToString:Converts the threadSupport integer variable to a string for output.
int threadSupportToString(int threadSupport, char *string){\\n\n if (threadSupport == MPI_THREAD_SINGLE){\\n\n strcpy(string,\"MPI_THREAD_SINGLE\");\\n\n } else if (threadSupport == MPI_THREAD_FUNNELED){\\n\n strcpy(string,\"MPI_THREAD_FUNNELED\");\\n\n } else if (threadSupport == MPI_THREAD_SERI...
SUBROUTINE threadSupportToString(threadSupport, string)\n integer, intent(in) :: threadSupport\n character (len = MAXSTRING), intent(out) :: string\n\n IF (threadSupport == MPI_THREAD_SINGLE) THEN\n string = "MPI_THREAD_SINGLE"\n ELSE IF (threadSupport == MPI_THREAD_FUNNELED) THEN\n string = "MPI_THREAD_FUNNELED"\n ELS...
43
initParallelEnv:Initialises the MPI and OpenMP environments. Finds the total number of MPI processes and OpenMP threads. Also finds the ID of each MPI process and OpenMP thread.
int initParallelEnv() { \n MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport); \n comm = MPI_COMM_WORLD; \n MPI_Comm_size(comm, &numMPIprocs); \n MPI_Comm_rank(comm, &myMPIRank); \n sizeInteger = sizeof(int); \n MPI_Get_processor_name(myProcName, &procNameLen); \n\n // * across node b...
SUBROUTINE initParallelEnv()\n\n !setup MPI programming environment \n CALL MPI_Init_thread(MPI_THREAD_MULTIPLE,threadSupport,ierr)\n\n comm = MPI_COMM_WORLD\n CALL MPI_Comm_size(comm, numMPIprocs, ierr)\n CALL MPI_Comm_rank(comm, myMPIRank, ierr)\n\n !Find the number of bytes for an int (numMPIprocs)\n CALL MPI_Type_s...
44
finaliseParallelEnv:Closes the MPI programming environment.
int finaliseParallelEnv(){ \n MPI_Finalize(); \n free(globalIDarray); \n return 0; \n}",
SUBROUTINE finaliseParallelEnv()\n\n !finalise the MPI programming environment\n CALL MPI_Finalize(ierr)\n !free the space created for globalIDarray...\n deallocate(globalIDarray)\n \n END SUBROUTINE finaliseParallelEnv
45
findRank:Finds the MPI ranks which will take part in the pingping or pingpong benchmarks based on the numbers read from the input file.
int findRank(int rankIn){\n int CalcRank;\n\n if (rankIn < 0){\n CalcRank = numMPIprocs + rankIn;\n } else{\n CalcRank = rankIn;\n }\n\n if (CalcRank > (numMPIprocs-1)){\n printf(\"Warning: Rank input greater than total process count.\\n\");\n printf(\"Using Rank = %d\\n\", nu...
SUBROUTINE findNeighbourRanks()\n integer :: dims(1) !dims array for MPI_Dims_Create\n logical, parameter :: PERIODS(1) = (/.true./), REORDER = .false.\n\n !find a good process distribution\n dims = 0 !zero so that dims_create tries to rearrange\n CALL MPI_Dims_Create(numMPIProcs,1,dims,ierr)\n\n !Create the cartesian ...
46
findNeighbourRanks:This creates a cartesian topology and finds the left and right neighbours of each process.
int findNeighbours(){\n\tint dims[1];\n\tint periods[1];\n\tint reorder;\n\n\tdims[0] = 0;\n\tMPI_Dims_create(numMPIprocs, 1, dims);\n\n\tperiods[0] = TRUE;\n\treorder = FALSE;\n\n\tMPI_Cart_create(comm, 1, dims, periods, reorder, &commCart);\n\n\tMPI_Cart_shift(commCart, 0, 1, &leftNeighbour, &rightNeighbour);\n\n\tre...
FUNCTION benchmarkSupport(required)\n integer, intent(in) :: required\n logical :: benchmarkSupport\n\n IF (required <= threadSupport) THEN\n benchmarkSupport = .true.\n ELSE\n benchmarkSupport = .false.\n END IF\n\n END FUNCTION benchmarkSupport
47
benchmarkSupport:This function compares the level of thread support needed by a particular benchmark with the level provided by the implementation.
int benchmarkSupport(int required){\n int benchSupport;\n if (required <= threadSupport){\n benchSupport = TRUE;\n } else {\n benchSupport = FALSE;\n }\n return benchSupport;\n}",
FUNCTION findRank(rankIn)\n integer, intent(in) :: rankIn\n integer :: findRank\n\n !Figure out actual MPI rank\n IF (rankIn < 0) THEN\n findRank = numMPIprocs + rankIn\n ELSE\n findRank = rankIn\n END IF\n\n !Check if findRank is too big or still -ve\n IF (findRank > (numMPIprocs-1)) THEN\n !write(,) "Warning: Rank in...
48
compareProcNames:Compares the names of 2 processes to check if they are on the same node or not.
int compareProcNames(int rankA, int rankB){ \\n\n int sameNode; \\n\n char recvProcName[MPI_MAX_PROCESSOR_NAME]; \\n\n if (myMPIRank == rankB){ \\n\n MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, rankA, TAG, comm); \\n\n } \\n\n else if (myMPIRank == rankA){ \\n\n MPI_Recv(recvProcName, MPI_MAX_PROC...
FUNCTION compareProcNames(rankA, rankB)\n integer, intent(in) :: rankA, rankB\n logical :: compareProcNames\n character (len = MPI_MAX_PROCESSOR_NAME) :: recvProcName\n\n !Rank B sends procName to Rank A\n IF (myMPIRank == rankB) THEN\n CALL MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, &\n MPI_CHARACTER, rankA, tag, co...
49
setupCommunicators:This creates two new communicators. The first gives a local communicator for processes on the same node. The second uses the local rank to give a communicator across node boundaries. e.g. for 16 nodes each with 2 processors, this routine will give 16 local communicators of size 2 and 2 communicators ...
int setupCommunicators(){\n int procHash;\n\n procHash = procNameToHash();\n\n /* local communicator. */\n MPI_Comm_split(comm, procHash, 0, &localComm);\n MPI_Comm_rank(localComm, &localCommRank);\n MPI_Comm_size(localComm, &localCommSize);\n\n MPI_Comm_split(comm, localCommRank, 0, &crossComm);\n...
SUBROUTINE setupCommunicators()\n integer :: procHash\n \n !Get hash based on processor name\n procHash = procNameToHash()\n \n !Comm_split using procHash as colour to get \n !local cmmunicator.\n CALL MPI_Comm_split(comm, procHash, 0, localComm, ierr)\n \n !Find ranks of processes in localComm\n CALL MPI_Comm_rank(loc...
50
procNameToHash:Creates an integer hash for each process. Each process on the same node will have the same hash value.
int procNameToHash(){\n int procHash,i;\n procHash = 0;\n for (i=0; i<procNameLen; i++){\n procHash = (7 * procHash) + (int)(myProcName[i]);\n }\n return procHash;\n}\\n",
FUNCTION procNameToHash()\n integer :: procNameToHash\n integer :: i\n\n !Initialise hash to 0\n procNameToHash = 0\n\n DO i = 1, procNameLen\n \n procNameToHash = 7 * procNameToHash + &\n ICHAR(myProcName(i:i))\n END DO\n \n END FUNCTION procNameToHash
51
exchangeWorldRanks:Finds the MPI_COMM_WORLD ranks of the processes participating in the multi-pingpong and multi-pingping benchmarks.
int exchangeWorldRanks(int nodeA, int nodeB, int *otherWorldRank) {\n\\n int destRank;\n\\n if (crossCommRank == nodeA) {\n\\n destRank = nodeB;\n\\n } else if (crossCommRank == nodeB) {\n\\n destRank = nodeA;\n\\n }\n\\n if (crossCommRank == nodeA || crossCommRank == nodeB) {\n\\n M...
SUBROUTINE exchangeWorldRanks(nodeA, nodeB, otherWorldRank)\ninteger, intent(in) :: nodeA, nodeB\ninteger, intent(out) :: otherWorldRank\ninteger :: destRank\n\nIF (crossCommRank == nodeA) THEN\ndestRank = nodeB\nELSE IF (crossCommRank == nodeB) THEN\ndestRank = nodeA\nEND IF\n\nIF (crossCommRank == nodeA .or. crossCom...
52
sendProcName:Sends the processor name from processes in destNode of crossComm to srcNode.
int sendProcName(int destNode, int srcNode, char *destProcName){ \n if (crossCommRank == srcNode){ \n MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, destNode, TAG, crossComm); \n } \n else if (crossCommRank == destNode){ \n MPI_Recv(destProcName, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, srcNode,...
SUBROUTINE sendProcName(destNode, srcNode, destProcName)\ninteger, intent(in) :: srcNode, destNode\ncharacter (len = MPI_MAX_PROCESSOR_NAME), intent(out) :: destProcName\n\nIF (crossCommRank == srcNode) THEN\nCALL MPI_Send(myProcName, MPI_MAX_PROCESSOR_NAME, &\nMPI_CHARACTER, destNode, tag, crossComm, ierr)\nELSE IF (c...
53
checkCrossCommBalance:Checks if there's a balance in the number of processes in crossComm nodes.
int crossCommBalance(int nodeA, int nodeB){\n int localCommSize, otherLocalCommSize;\n int crossCommBalance;\n\n MPI_Comm_size(localComm, &localCommSize);\n\n if ((crossCommRank == nodeB) && (localCommRank == 0)){\n MPI_Send(&localCommSize, 1, MPI_INT, nodeA, TAG, crossComm);\n }\n else if ((cr...
FUNCTION crossCommBalance(nodeA, nodeB)\ninteger, intent(in) :: nodeA, nodeB\ninteger :: localCommSize, otherLocalCommSize\nlogical :: crossCommBalance\n\nCALL MPI_Comm_size(localComm, localCommSize, ierr)\n\nIF (crossCommRank == nodeB .and. localCommRank == 0) THEN\nCALL MPI_Send(localCommSize, 1, MPI_INTEGER, nodeA, ...
54
haloExchange:Driver subroutine for the haloExchange benchmark.
int haloExchange(int benchmarkType){ \n int dataSizeIter; \n findNeighbours(); \n repsToDo = defaultReps; \n dataSizeIter = minDataSize; /* Initialise dataSizeIter */ \n \n while (dataSizeIter <= maxDataSize){ \n sizeofBuffer = dataSizeIter * numThreads; \n allocateHaloexchangeData(sizeo...
SUBROUTINE haloExchange(benchmarkType)\ninteger, intent(in) :: benchmarkType\ninteger :: dataSizeIter\n\nCALL findNeighbourRanks()\n\nrepsToDo = defaultReps\n\ndataSizeIter = minDataSize\nDO WHILE (dataSizeIter <= maxDataSize)\nsizeofBuffer = dataSizeIter * numThreads\n\nCALL allocateData(sizeofBuffer)\n\nIF (benchmark...
55
masteronlyHaloexchange:Each process exchanges a message with its left and right neighbour. Communication takes place outside of the parallel region.
int masteronlyHaloexchange(int totalReps, int dataSize){\n int repIter, i;\n \n for (repIter=0; repIter<totalReps; repIter++){\n \n /* Fill leftSendBuf and rightSendBuf using a parallel for directive. */\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(lef...
SUBROUTINE masteronlyHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\nDO repIter = 1, totalReps\n\n!Each thread writes its globalID to rightSendBuf\n!and leftSendBuf with a parallel do directive\n!$OMP PARALLEL DO DEFAULT(NONE), &\n!$OMP PRIVATE(i), &\n!$OMP SHARED(l...
56
funnelledHaloexchange:Each process exchanges a message with its left and right neighbour. Communication takes place by one thread inside of the parallel region.
int funnelledHaloexchange(int totalReps, int dataSize){\n int repIter, i;\n\n #pragma omp parallel default(none) \\\n private(i,repIter) \\\n shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \\\n shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,finalRightBuf) \\\n shared(globalIDarray,commCart,total...
SUBROUTINE funnelledHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\n\n!Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf), &\n!$OMP SHARED(rightRecvBuf,leftRecvBuf,fina...
57
multipleHaloexchange:Each process exchanges a message with its left and right neighbour. All threads take part in the inter-porcess communication.
int multipleHaloexchange(int totalReps, int dataSize){ \n int repIter, i; \n int lBound; \n \n #pragma omp parallel default(none) \\ \n private(i,requestArray,statusArray,lBound,repIter) \\ \n shared(dataSize,sizeofBuffer,leftSendBuf,rightSendBuf) \\ \n shared(rightRecvBuf,leftRecvBuf,finalLeftBuf,...
SUBROUTINE multipleHaloexchange(totalReps, dataSize)\ninteger, intent(in) :: totalReps, dataSize\ninteger :: repIter, i\ninteger :: lBound, uBound\n\n !Open the parallel region\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,requestArray,statusArray,status,ierr), &\n!$OMP PRIVATE(lBound,uBound,repIter),&\n!$OMP SHARE...
58
allocateHaloexchangeData:Allocate memory for the main data arrays in the haloexchange.
int allocateHaloexchangeData(int sizeofBuffer){\n leftSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n leftRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n rightSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n rightRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n finalLeftBuf = (i...
SUBROUTINE allocateData(bufferSize)\ninteger, intent(in) :: bufferSize\n\nallocate(leftSendBuf(bufferSize), leftRecvBuf(bufferSize))\nallocate(rightSendBuf(bufferSize), rightRecvBuf(bufferSize))\nallocate(finalLeftBuf(bufferSize), finalRightBuf(bufferSize))\n\nEND SUBROUTINE allocateData
59
freeHaloexchangeData:Deallocates the storage space for the main data arrays.
int freeHaloexchangeData(){ \n free(leftSendBuf); \n free(leftRecvBuf); \n free(rightSendBuf); \n free(rightRecvBuf); \n free(finalLeftBuf); \n free(finalRightBuf); \n return 0; \n}",
SUBROUTINE freeData()\ndeallocate(leftSendBuf, leftRecvBuf)\ndeallocate(rightSendBuf, rightRecvBuf)\ndeallocate(finalLeftBuf, finalRightBuf)\n\nEND SUBROUTINE freeData
60
testHaloexchange:Verifies that the halo exchange benchmark worked correctly.
int testHaloexchange(int sizeofBuffer, int dataSize){\n int i;\n int testFlag, reduceFlag;\n int *testLeftBuf, *testRightBuf;\n\n testFlag = TRUE;\n\n testLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n testRightBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\n #pragma omp parallel for de...
SUBROUTINE testHaloexchange(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, reduceFlag\n\n !set testFlag to true\n testFlag = .true.\n\n !allocate space for testLeftBuf and testRightBuf\n allocate(testLeftBuf(sizeofBuffer),testRightBuf(sizeofBuffer))\n\n !Co...
61
multiPingPing:Driver subroutine for the multi-pingping benchmark.
int multiPingping(int benchmarkType){\n\\n int dataSizeIter;\n\\n char otherProcName[MPI_MAX_PROCESSOR_NAME];\n\\n int balance;\n\\n pingNodeA = 0;\n\\n pingNodeB = 1;\n\\n balance = crossCommBalance(pingNodeA, pingNodeB);\n\n\\n if (balance == FALSE){\n\\n if (myMPIRank == 0){\n\\n ...
SUBROUTINE multiPingPing(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n character (len = MPI_MAX_PROCESSOR_NAME) :: otherProcName\n logical :: balance\n\n pingNodeA = 0\n pingNodeB = 1\n\n !Check if there's a balance in num of MPI processes \n !in pingNodeA and pingNodeB.\n balance = ...
62
masteronlyMultiPingping:All Processes with rank of pingNodeA or pingNodeB in crossComm send a message to each other. MPI communication takes place outside of the parallel region.
int masteronlyMultiPingping(int totalReps, int dataSize){\n int repIter, i;\n int destRank;\n\n /* set destRank to ID of other process */\n if (crossCommRank == pingNodeA){\n destRank = pingNodeB;\n }\n else if (crossCommRank == pingNodeB){\n destRank = pingNodeA;\n }\n\n /* loop totalRep times */\n for ...
SUBROUTINE masteronlyMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) THEN\n destRank = pingNodeA\n E...
63
funnelledMultiPingping:All processes with rank of pingNodeA or pingNodeB in crossComm send a message to each other. Inter-process communication takes place inside the OpenMP parallel region by the master thread.
int funnelledMultiPingping(int totalReps, int dataSize){\n int repIter, i;\n int destRank;\n\n if (crossCommRank == pingNodeA){\n destRank = pingNodeB;\n } \n else if (crossCommRank == pingNodeB){\n destRank = pingNodeA;\n }\n\n #pragma omp parallel default(none) \\\n ...
SUBROUTINE funnelledMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) THEN\n destRank = pingNodeA\n EN...
64
multipleMultiPingping:All processes with crossCommRank of pingNodeA and pingNodeB in crossComm send a message to each other.Multiple threads take part in the communication.
int multipleMultiPingping(int totalReps, int dataSize){ \n int repIter, i; \n int destRank; \n int lBound; \n if (crossCommRank == pingNodeA){ \n destRank = pingNodeB; \n } else if (crossCommRank == pingNodeB){ \n destRank = pingNodeA; \n } \n #pragma omp parallel default(none) ...
SUBROUTINE multipleMultiPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n integer :: lBound, uBound\n\n !set destRank to be ID of other process\n IF (crossCommRank == pingNodeA) THEN\n destRank = pingNodeB\n ELSE IF (crossCommRank == pingNodeB) TH...
65
allocateMultiPingpingData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
int allocateMultiPingpingData(int sizeofBuffer){ \n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB){ \n pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n pingRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); ...
SUBROUTINE allocateData(sizeofBuffer)\n integer, intent(in) :: sizeofBuffer\n\n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n\n allocate(pingSendBuf(sizeofBuffer))\n allocate(pingRecvBuf(sizeofBuffer))\n allocate(finalRecvBuf(sizeofBuffer))\n\n END IF\n\n END SUBROUTINE allocateData
66
freeMultiPingpingData:Free allocated memory for main data arrays.
int freeMultiPingpingData() {\n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) {\n free(pingSendBuf);\n free(pingRecvBuf);\n free(finalRecvBuf);\n }\n return 0;\n}",
SUBROUTINE freeData()\n \n IF (crossCommRank == pingNodeA .or. &\n crossCommRank == pingNodeB) THEN\n \n deallocate(pingSendBuf, pingRecvBuf)\n deallocate(finalRecvBuf)\n\n END IF\n \n END SUBROUTINE freeData
67
testMultiPingping:Verifies the the multi-pingping benchmark worked correctly.
int testMultiPingping(int sizeofBuffer, int dataSize){\n int i;\n int testFlag, localTestFlag;\n localTestFlag = TRUE;\n\n if (crossCommRank == pingNodeA || crossCommRank == pingNodeB) {\n testBuf = (int *)malloc(sizeof(int) * sizeofBuffer);\n\n #pragma omp parallel for default(none) \\...
SUBROUTINE testMultiPingping(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, localTestFlag\n\n !set localTestFlag to true\n localTestFlag = .true.\n\n !Testing done for processes on pingNodeA & pingNodeB\n IF (crossCommRank == pingNodeA .or. &\n crossCommRan...
68
multiPingPong:Driver subroutine for the multi-pingpong benchmark.
int multiPingPong(int benchmarkType){\n int dataSizeIter;\n int pongWorldRank;\n char pongProcName[MPI_MAX_PROCESSOR_NAME];\n int balance;\n pingNode = 0;\n pongNode = 1;\n\n balance = crossCommBalance(pingNode, pongNode);\n if (balance == FALSE){\n if (myMPIRank == 0){\n print...
SUBROUTINE multiPingPong(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n integer :: pongWorldRank\n character (len = MPI_MAX_PROCESSOR_NAME) :: pongProcName\n logical :: balance\n\n pingNode = 0\n pongNode = 1\n\n !Check if there's a balance in num of MPI processes on\n !pingNode and p...
69
masteronlyMultiPingpong:All MPI processes in crossComm = pingNode sends a single fixed length message to the neighbouring process in crossComm = pongNode. The neighbouring processes then sends the message back to the first process.
int masteronlyMultiPingpong(int totalReps, int dataSize){\n int repIter, i;\n for (repIter = 1; repIter <= totalReps; repIter++){\n if (crossCommRank == pingNode){\n #pragma omp parallel for default(none) \\\n private(i) \\\n shared(pingSendBuf,dataSize,sizeofBuffer,globalI...
SUBROUTINE masteronlyMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n\n DO repIter = 1, totalReps !loop totalRep times\n\n !All threads under each MPI process with \n !crossCommRank = pingNode write to pingSendBuf array\n !using a PARALLEL DO directive.\n IF (cro...
70
funnelledMultiPingpong:All MPI processes in crossComm = pingNode sends a single fixed length message to the neighbouring process in crossComm = pongNode. The neighbouring processes then sends the message back to the first process. All communication takes place within the OpenMP parallel region for this benchmark.
int funnelledMultiPingpong(int totalReps, int dataSize){\n int repIter, i;\n #pragma omp parallel default(none) \\\n private(i,repIter) \\\n shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \\\n shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n shared(dataSize,globalIDarr...
SUBROUTINE funnelledMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n\n !Open parallel region for threads\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter), &\n!$OMP SHARED(pingNode,pongNode,pingSendBuf,pingRecvBuf),&\n!$OMP SHARED(pongSendBuf,pongRecvBuf...
71
multipleMultiPingpong:Multiple threads take place in the communication and computation. Each thread of all MPI processes in crossComm = pingNode sends a portion of the message to the neighbouring process in crossComm = pongNode. Each thread of the neighbouring processes then sends the message back to the first process....
int multipleMultiPingpong(int totalReps, int dataSize){ \n int repIter, i; \n int lBound; \n #pragma omp parallel default(none) \\\n private(i,repIter,status,lBound) \\\n shared(pingNode,pongNode,pingSendBuf,pingRecvBuf) \\\n shared(pongSendBuf,pongRecvBuf,finalRecvBuf,sizeofBuffer) \\\n shared(dat...
SUBROUTINE multipleMultiPingpong(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: lBound, uBound\n\n !Open parallel region for threads\n!$OMP PARALLEL DEFAULT(NONE), &\n!$OMP PRIVATE(i,repIter,ierr,status,lBound,uBound), &\n!$OMP SHARED(pingNode,pongNode,pingSendBuf...
72
allocateMultiPingpongData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
int allocateMultiPingpongData(int sizeofBuffer){ \n\n if (crossCommRank == pingNode){ \n pingSendBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n pongRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n finalRecvBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n } \n else if (crossC...
SUBROUTINE allocateData(sizeofBuffer)\n integer, intent(in) :: sizeofBuffer\n \n IF (crossCommRank == pingNode) THEN\n !allocate space for arrays that MPI processes\n !with crossCommRank = pingNode will use\n allocate(pingSendBuf(sizeofBuffer))\n allocate(pongRecvBuf(sizeofBuffer))\n allocate(finalRecvBuf(sizeofBuffer)...
73
freeMultiPingpongData:Deallocates the storage space for the main data arrays.
int freeMultiPingpongData(){ \n if (crossCommRank == pingNode){ \n free(pingSendBuf); \n free(pongRecvBuf); \n free(finalRecvBuf); \n } \n else if (crossCommRank == pongNode){ \n free(pingRecvBuf); \n free(pongSendBuf); \n } \n return 0; \n}",
SUBROUTINE freeData()\n \n IF (crossCommRank == pingNode) THEN\n deallocate(pingSendBuf)\n deallocate(pongRecvBuf)\n deallocate(finalRecvBuf)\n ELSE IF (crossCommRank == pongNode) THEN\n deallocate(pingRecvBuf)\n deallocate(pongSendBuf)\n END IF\n\n END SUBROUTINE freeData
74
testMultiPingpong:Verifies the the multi pingpong benchmark worked correctly.
int testMultiPingpong(int sizeofBuffer, int dataSize){ \n int i; \n int testFlag, localTestFlag; \n localTestFlag = TRUE; \n \n if (crossCommRank == pingNode){ \n testBuf = (int *)malloc(sizeof(int) * sizeofBuffer); \n \n #pragma omp parallel for default(none) \\ \n private(i)...
SUBROUTINE testMultiPingpong(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: i\n logical :: testFlag, localTestFlag\n\n !Initialise localtestFlag to true\n localTestFlag = .true.\n\n !All processes with crossCommRank = pingNode check\n !if multi-pingpong worked ok.\n IF (crossCommRa...
75
pingPing:Driver subroutine for the pingping benchmark.
int pingPing(int benchmarkType){\n int dataSizeIter;\n int sameNode;\n pingRankA = PPRanks[0];\n pingRankB = PPRanks[1];\n\n sameNode = compareProcNames(pingRankA, pingRankB);\n\n if (myMPIRank == 0){\n printNodeReport(sameNode,pingRankA,pingRankB);\n printBenchHeader();\n }\n\n re...
SUBROUTINE pingPing(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n logical :: sameNode\n\n pingRankA = PPRanks(1)\n pingRankB = PPRanks(2)\n\n !Check if pingRankA and pingRankB are on the same node\n sameNode = compareProcNames(pingRankA, pingRankB)\n \n IF (myMPIRank == 0) THEN\n !pr...
76
masteronlyPingping:Two processes send a message to each other using the MPI_Isend, MPI_Recv and MPI_Wait routines. Inter-process communication takes place outside of the parallel region.
int masteronlyPingping(int totalReps, int dataSize){\\n\n int repIter, i;\\n\n int destRank;\\n\n if (myMPIRank == pingRankA){\\n\n destRank = pingRankB;\\n\n } else if (myMPIRank == pingRankB){\\n\n destRank = pingRankA;\\n\n }\\n\n for (repIter = 0; repIter < totalReps; repIter++){\\n\n if (myMPIRank == pingRan...
SUBROUTINE masteronlyPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank = pingRankA\n END IF\n\n DO ...
77
funnelledPingPing:Two processes send a message to each other using the MPI_Isend, MPI_Recv and MPI_Wait routines. Inter-process communication takes place inside the OpenMP parallel region.
int funnelledPingping(int totalReps, int dataSize){\n\tint repIter, i;\n\tint destRank;\n\n /* set destRank to ID of other process */\n if (myMPIRank == pingRankA){\n \tdestRank = pingRankB;\n }\n else if (myMPIRank == pingRankB){\n \tdestRank = pingRankA;\n }\n\n\t/* Open the parallel region */\n#...
SUBROUTINE funnelledPingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n\n !set destRank to ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank = pingRankA\n END IF\n\n !Ope...
78
multiplePingping:With this algorithm multiple threads take place in the communication and computation. Each thread sends its portion of the pingSendBuf to the other process using MPI_Isend/ MPI_Recv/ MPI_Wait routines.
int multiplePingping(int totalReps, int dataSize){\n\tint repIter, i;\n\tint destRank;\n\tint lBound;\n\n /* set destRank to ID of other process */\n if (myMPIRank == pingRankA){\n \tdestRank = pingRankB;\n }\n else if (myMPIRank == pingRankB){\n \tdestRank = pingRankA;\n }\n\n /* Open parallel ...
SUBROUTINE multiplePingping(totalReps, dataSize)\n integer, intent(in) :: totalReps, dataSize\n integer :: repIter, i\n integer :: destRank\n integer :: lBound, uBound\n\n !set destRank to be ID of other process\n IF (myMPIRank == pingRankA) THEN\n destRank = pingRankB\n ELSE IF (myMPIRank == pingRankB) THEN\n destRank...
79
allocatePingpingData:Allocates space for the main data arrays. Size of each array is specified by subroutine argument.
int allocatePingpingData(int sizeofBuffer){\n\n\tpingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tpingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\tfinalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));\n\n\treturn 0;\n}
SUBROUTINE allocateData(bufferSize)\n integer, intent(in) :: bufferSize\n\n allocate(pingSendBuf(bufferSize), pingRecvBuf(bufferSize))\n allocate(finalRecvBuf(bufferSize))\n\n END SUBROUTINE allocateData
80
freePingpingData:Deallocates the storage space for the main data arrays.
int freePingpingData(){\n\n\tfree(pingSendBuf);\n\tfree(pingRecvBuf);\n\tfree(finalRecvBuf);\n\n\treturn 0;\n}
SUBROUTINE freeData()\n\n deallocate(pingSendBuf, pingRecvBuf)\n deallocate(finalRecvBuf)\n\n END SUBROUTINE freeData
81
testPingping:Verifies that the PingPing benchmark worked correctly.
int testPingping(int sizeofBuffer,int dataSize){\n\tint otherPingRank, i, testFlag, reduceFlag;\n\tint *testBuf;\n\t/* initialise testFlag to true (test passed) */\n\ttestFlag = TRUE;\n\t/* Testing only needs to be done by pingRankA & pingRankB */\n\tif (myMPIRank == pingRankA || myMPIRank == pingRankB){\n\t\t/* alloca...
SUBROUTINE testPingPing(sizeofBuffer, dataSize)\n integer, intent(in) :: sizeofBuffer, dataSize\n integer :: otherPingRank, i\n logical :: testFlag, reduceFlag\n \n !set testFlag to true\n testFlag = .true.\n \n !Testing only needs to be done by pingRankA & pingRankB\n IF (myMPIRank == pingRankA .or. myMPIRank == pingR...
82
pingPong:Driver subroutine for the pingpong benchmark.
int pingPong(int benchmarkType){\n\tint dataSizeIter;\n\tint sameNode;\n\tpingRank = PPRanks[0];\n\tpongRank = PPRanks[1];\n\t/* Check if pingRank and pongRank are on the same node */\n\tsameNode = compareProcNames(pingRank,pongRank);\n\t/* Master process then does some reporting */\n\tif (myMPIRank == 0){\n\t\t/* prin...
SUBROUTINE pingPong(benchmarkType)\n integer, intent(in) :: benchmarkType\n integer :: dataSizeIter\n logical :: sameNode\n\n pingRank = PPRanks(1) \n pongRank = PPRanks(2)\n \n !Check if pingRank and pongRank are on the same node\n sameNode = compareProcNames(pingRank,pongRank)\n \n !Master process then does reporting...
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
8