OpenMPI — Collective communication

MPI_Reduce() example

In this program each process generates an array of random integers. They all then reduce the array down to the root process choosing the minimum for each element. The root then displays each of the minima.

| Demo  MPI_Reduce().
| 2019-12-03
#include <stdio.h>
#include <time.h>   // time()
#include <stdlib.h> // atoi(), random()
#include <mpi.h>

int main(int argc, char **argv)
    if (argc != 2) {
        fprintf(stderr, "usage: %s <array_size>\n", argv[0]);
        return 1;
    int array_size = atoi(argv[1]);

    MPI_Init(&argc, &argv); // args not needed, but supplied anyway.
    int world_size, my_rank;
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    int name_len;
    char my_name[MPI_MAX_PROCESSOR_NAME];
    MPI_Get_processor_name(my_name, &name_len);
    int root_proc = 0;

    // Everybody generates local data:
    int sendbuf[array_size];
    for (int i = 0; i < array_size; i++)
        sendbuf[i] = random() >> 20;

    int *recvbuf = NULL;
    // Only root needs a receive buffer:
    if (my_rank == root_proc)
        recvbuf = malloc(array_size * sizeof(int));

    MPI_Reduce(sendbuf, recvbuf, array_size, MPI_INT,
            MPI_MIN,    // find the global minima, one per element
            root_proc, MPI_COMM_WORLD);

    if (my_rank == root_proc)
        for (int i = 0; i < array_size; i++)
            printf("    %s/%02d - global minimum[%d]: %d\n",
                    my_name, my_rank, i, recvbuf[i]);

    return 0;