Parallel quicksort (MPI)

Discussion in 'C Programming' started by simo, Mar 7, 2007.

  1. simo

    simo Guest

    Hello to all... I am trying to write an algorithm parallel in order to
    realize the quicksort. They are to the first crews with C and MPI. The
    algorithm that I am trying to realize is PARALLEL QUICKSORT with
    REGULAR SAMPLING. The idea on the planning is right. the code that I
    have written syntactically does not only have problems that it does
    not want any to know to work... Are two days that I analyze it but I do
    not succeed to find the errors... Someone of you can give a hand to me
    is deprived of hope. Ringrazio to you in advance payment... The code is
    following:

    #include <stdio.h>
    #include <stdlib.h>
    #include <mpi.h>
    #include "MyMPI.h"

    int main (int argc, char *argv[])
    {
    int i;
    int id; /* Identificativo del processo */
    int p; /* Numero di processi */
    int n; /* Numero di elementi che appartengono al vettore*/
    int high_value; /* Indice massimo del vettore per questo processo */
    int low_value; /* Indice minimo del vettore per questo processo */
    int size; /* Numero di elementi del vettore memorizzati da questo
    processo */
    int *vettore;
    void quicksort (int *a, int l, int r);

    MPI_Init(&argc,&argv);
    MPI_Comm_rank (MPI_COMM_WORLD, &id);
    MPI_Comm_size (MPI_COMM_WORLD, &p);

    /* Controllo che sia specificata la dimensione del vettore
    da ordinare */
    if (argc != 2) {
    if (!id) printf ("Command line: %s <m>\n", argv[0]);
    MPI_Finalize();
    exit (1);
    }

    /* leggo la grandezza del vettore*/
    n = strtol(argv[1],NULL,10);

    /* effettuo la decomposizione a blocchi del vettore */
    low_value = BLOCK_LOW(id,p,n);
    high_value = BLOCK_HIGH(id,p,n);
    size = BLOCK_SIZE (id,p,n);

    /* alloco la memoria per la parte di vettore gestita da questo
    processo */
    vettore = (int *) malloc (size * sizeof(int) );

    /* Inserisco i valori nel vettore utilizzando la funzione random */
    srand (id+n);
    for (i=0; i<size; i++) vettore= rand() %(n);


    /* ordino utilizzando il quick sort sequenziale */
    quicksort (vettore, 0, size-1);
    for (i=0; i<size;i++)
    printf (" proc %d ordinati %d\n ",id, vettore);

    /* ogni processo invia al processo root i "local regular sample"*/
    int local [p];
    int c=0;
    for (i=0; i<size;i +=p){
    local[c]= vettore ;
    c++;
    }
    int sample [p*p];
    /* dopo la gather il processo zero ha in sample tutti i regular
    sample*/
    MPI_Gather (local, p, MPI_INT, sample, p, MPI_INT, 0, MPI_COMM_WORLD);

    if (!id){
    printf (" ecco gli elementi: ");
    for (i=0;i<p*p;i++)
    printf ("%d ",sample);
    fflush(stdout);
    }

    int pivot [p-1];
    /* Il processo 0 ordina i regular sample e seleziona p-1 pivot */
    MPI_Barrier(MPI_COMM_WORLD);
    if (!id){

    quicksort (sample, 0,p*p-1 );
    for (i=0; i<p*p; i++);
    printf (" regular sample ordinati: %d \n", sample);
    for (i=1; i<p; i++)
    pivot[i-1]= sample [i*p+(p/2)-1];
    }
    /* Il processo root broadcast i pivot agli altri processi*/
    MPI_Bcast (pivot, p-1, MPI_INT, 0, MPI_COMM_WORLD);

    /* Ogni processo divide il suo array in p-1 sottoarray*/
    int inizio [p]; /*segna memorizza il punto di divisione dell'array */
    int fine [p];
    inizio[0]=0;
    fine [p-1]=size;
    int j;
    i=0;
    for (j=0; j<p-1; j++){
    for ( i; i<size; i++){
    if ( vettore>pivot [j]) {
    fine [j] = i;
    inizio[j+1]=i;
    //printf(" \nvettore %d pivot [j] %d ",vettore,pivot[j]);
    //printf( " \nprocesso %d, fine %d, inizio %d",id,fine[j],inizio[j]);
    //fflush(stdout);
    i++;
    break;
    }}
    }
    //printf(" \nprocesso %d, fine %d, inizio %d",id,fine[j+1],inizio[j
    +1]);
    //fflush(stdout);
    /* Utilizzando una comunicazione all-to-all i processi si scambiano
    le parti dell'array.*/

    int start_rec[p];
    int fine_rec[p];
    int cnt [p];
    int k=0;
    int rec_disp [p];

    MPI_Alltoall ( inizio, 1, MPI_INT, start_rec, 1, MPI_INT,
    MPI_COMM_WORLD);
    MPI_Alltoall ( fine, 1, MPI_INT, fine_rec, 1, MPI_INT,
    MPI_COMM_WORLD);

    for (i=0; i<p;i++) {cnt= (fine_rec - start_rec+1);
    // printf ("\n fine_rec: %d, start_rec: %d",fine_rec,start_rec);
    // printf ("\nprocesso: %d cnt %d, %d ",id,i,cnt);
    // fflush(stdout);
    }
    for (i=0;i<p;i++) k += cnt; // k rappresenta il numero di valori
    totali che si riceveranno
    int ordinato [k];
    rec_disp [0]=cnt[0];
    for (i=1; i<p;i++) rec_disp = rec_disp[i-1]+cnt;
    MPI_Alltoallv( vettore, inizio, fine, MPI_INT, ordinato, cnt,
    rec_disp, MPI_INT, MPI_COMM_WORLD);

    /* ogni processo esegue il quicksort su "ordinato" */
    quicksort (ordinato, 0,k-1);

    /* Salvo nel processo root il vettore con tutti i valori in ordine */
    int finale [n];
    MPI_Gather (&k, 1, MPI_INT, rec_disp, 1, MPI_INT, 0, MPI_COMM_WORLD);
    MPI_Gatherv (ordinato, k, MPI_INT, finale,&k, rec_disp, MPI_INT, 0,
    MPI_COMM_WORLD);
    if (!id){
    printf( "Vettore ordinato: ");
    for (i=0; i<n; i++)
    printf( "%d, ",finale);
    }
    MPI_Finalize();
    return 0;
    }

    void quicksort(int *a, int l, int r)
    {
    int q;
    if (r>l) {
    q=partition(a,l,r);
    quicksort(a,l,q-1);
    quicksort(a,q+1,r);
    }
    }
    int partition(int a[], int l, int r)
    {
    int v, i, j, t;
    v=a[r];
    i=l-1;
    j=r;
    for (;;)
    {
    while (a[++i]<v && i < r);
    while ((a[--j]>v)&&(j>=l));
    if (i>=j) break;
    t=a;
    a=a[j];
    a[j]=t;
    }
    t=a[i];
    a[i]=a[r];
    a[r]=t;
    return i;
    }[/i][/i]
     
    simo, Mar 7, 2007
    #1
    1. Advertising

  2. simo

    santosh Guest

    simo wrote:
    > Hello to all... I am trying to write an algorithm parallel in order to
    > realize the quicksort. They are to the first crews with C and MPI. The
    > algorithm that I am trying to realize is PARALLEL QUICKSORT with
    > REGULAR SAMPLING. The idea on the planning is right. the code that I
    > have written syntactically does not only have problems that it does
    > not want any to know to work... Are two days that I analyze it but I do
    > not succeed to find the errors... Someone of you can give a hand to me
    > is deprived of hope. Ringrazio to you in advance payment... The code is
    > following:
    >
    > #include <stdio.h>
    > #include <stdlib.h>
    > #include <mpi.h>
    > #include "MyMPI.h"


    <snip>

    This group deals only with ISO C, which has no concept of multiple
    processes, so if your problem is in the code responsible for
    parallelism, then you might get a better response from posting to a
    group like comp.programming.threads or a group or list for MPI.

    Also try to reduce your code to the smallest possible compilable
    subset that still exhibits the problem. Most importantly, try to
    format your code better. Currently it's close to unreadable.
     
    santosh, Mar 7, 2007
    #2
    1. Advertising

  3. simo

    user923005 Guest

    He found news:comp.parallel.mpi and so his MPI queries are quite
    topical there and I am sure he will find good assistance.
     
    user923005, Mar 7, 2007
    #3
    1. Advertising

Want to reply to this thread or ask your own question?

It takes just 2 minutes to sign up (and it's free!). Just click the sign up button to choose a username and then you can ask your own questions on the forum.
Similar Threads
  1. aaragon
    Replies:
    1
    Views:
    483
    Victor Bazarov
    Aug 29, 2007
  2. Kevin McMurtrie

    Re: Parallel quicksort

    Kevin McMurtrie, May 16, 2010, in forum: Java
    Replies:
    0
    Views:
    706
    Kevin McMurtrie
    May 16, 2010
  3. Arne Vajhøj

    Re: Parallel quicksort

    Arne Vajhøj, May 16, 2010, in forum: Java
    Replies:
    9
    Views:
    1,388
    kebodi
    Nov 6, 2010
  4. aminer
    Replies:
    1
    Views:
    552
    aminer
    Aug 18, 2012
  5. aminer
    Replies:
    0
    Views:
    372
    aminer
    Aug 18, 2012
Loading...

Share This Page