OpenMPI — Collective communication

MPI_Scatter() example

This program generates an array of integers (only on the root process), then scatters the array to all processes. Each process then calculates the average of the recvbuf elements that it received, and displays the average. Since each process received a different part of the array, each calculates a different average.

Although the order of process outputs is nondeterministic, this program doesn't do enough to show any variability.

/*
* scatter from root, multiple recipients.
* 2019-11-30
*/
#include <stdio.h>
#include <string.h> // strlen(), strncpy()
#include <stdlib.h> // malloc()
#include "mpi.h"

#define INTS_PER_PROC 1000000

int main(int argc, char **argv)
{
    MPI_Init(&argc, &argv); // Initialize MPI execution environment

    // Identify process' rank, world size, and host's processor-name:
    int rank, nprocs, namelen;
    char hostname[MPI_MAX_PROCESSOR_NAME];
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Get_processor_name(hostname, &namelen);

    int sender = 0;

    // Unused by all except sender:
    //  sendbuf: INTS_PER_PROC elements, times # of processes:
    int *sendbuf = NULL;
    // Everybody needs:
    //  recvbuf: INTS_PER_PROC elements only:
    int *recvbuf = malloc(INTS_PER_PROC * sizeof(int));

    if (rank == sender) {
        sendbuf = malloc(INTS_PER_PROC * nprocs * sizeof(int));
        for (int i = 0; i < INTS_PER_PROC * nprocs; i++)
            sendbuf[i] = i;
    }
    MPI_Scatter( sendbuf, INTS_PER_PROC, MPI_INT,
        recvbuf, INTS_PER_PROC, MPI_INT, sender, MPI_COMM_WORLD
    );

    double sum = 0.0;
    for (int j = 0; j < INTS_PER_PROC; j++)
        sum += recvbuf[j];
    printf("%s/%02d:  recvbuf average == %lf\n",
            hostname, rank, sum/(double)INTS_PER_PROC);

    MPI_Finalize();     // Terminate MPI execution environment
    return 0;
}