OpenMPI — Collective communication

MPI_Bcast() example

This program generates an array of random integers (only on the root process), then broadcasts the array to all processes. Each process then calculates the average of the array, and displays it. Since they all received the same array, they all calculate the same average.

Although the order of process outputs is nondeterministic, this program doesn't do enough to show any variability.

/*
| Demo  MPI_Bcast().
| 2019-11-30
*/
#include <stdio.h>
#include <time.h>   // time()
#include <stdlib.h> // random()
#include <mpi.h>

#define ARYSIZE 1000

int main(int argc, char **argv)
{
    //------------------------------------------------------------------

    MPI_Init(&argc, &argv); // args not needed, but supplied anyway.
    int world_size, my_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    int name_len;
    char my_name[MPI_MAX_PROCESSOR_NAME];
    MPI_Get_processor_name(my_name, &name_len);

    int root_proc = 0;

    int buffer[ARYSIZE];
    if (my_rank == root_proc) {
        srandom(time(NULL));
        for (int i = 0; i < ARYSIZE; i++)
            buffer[i] = random() >> 10;
    }
    MPI_Bcast(buffer, ARYSIZE, MPI_INT, root_proc, MPI_COMM_WORLD);

    double average = 0.0;
    for (int i = 0; i < ARYSIZE; i++)
        average += buffer[i];
    average /= (double)ARYSIZE;
    printf("%s:%02d - average: %lf\n", my_name, my_rank, average);

    MPI_Finalize();
    return 0;
}