Actual source code: mmdense.c


  2: /*
  3:    Support for the parallel dense matrix vector multiply
  4: */
  5: #include <../src/mat/impls/dense/mpi/mpidense.h>
  6: #include <petscblaslapack.h>

  8: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
  9: {
 10:   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;

 12:   /* Create local vector that is used to scatter into */
 13:   VecDestroy(&mdn->lvec);
 14:   if (mdn->A) {
 15:     MatCreateVecs(mdn->A,&mdn->lvec,NULL);
 16:     PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->lvec);
 17:   }
 18:   if (!mdn->Mvctx) {
 19:     PetscLayoutSetUp(mat->cmap);
 20:     PetscSFCreate(PetscObjectComm((PetscObject)mat),&mdn->Mvctx);
 21:     PetscSFSetGraphWithPattern(mdn->Mvctx,mat->cmap,PETSCSF_PATTERN_ALLGATHER);
 22:     PetscLogObjectParent((PetscObject)mat,(PetscObject)mdn->Mvctx);
 23:   }
 24:   return 0;
 25: }

 27: static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);

 29: PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
 30: {
 31:   PetscInt       nmax,nstages_local,nstages,i,pos,max_no;

 33:   /* Allocate memory to hold all the submatrices */
 34:   if (scall != MAT_REUSE_MATRIX) {
 35:     PetscCalloc1(ismax+1,submat);
 36:   }
 37:   /* Determine the number of stages through which submatrices are done */
 38:   nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
 39:   if (!nmax) nmax = 1;
 40:   nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0);

 42:   /* Make sure every processor loops through the nstages */
 43:   MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));

 45:   for (i=0,pos=0; i<nstages; i++) {
 46:     if (pos+nmax <= ismax) max_no = nmax;
 47:     else if (pos == ismax) max_no = 0;
 48:     else                   max_no = ismax-pos;
 49:     MatCreateSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
 50:     pos += max_no;
 51:   }
 52:   return 0;
 53: }
 54: /* -------------------------------------------------------------------------*/
 55: PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
 56: {
 57:   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
 58:   Mat            A  = c->A;
 59:   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
 60:   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
 61:   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
 62:   const PetscInt **irow,**icol,*irow_i;
 63:   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
 64:   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
 65:   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
 66:   PetscInt       is_no,jmax,**rmap,*rmap_i;
 67:   PetscInt       ctr_j,*sbuf1_j,*rbuf1_i;
 68:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 69:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
 70:   MPI_Comm       comm;
 71:   PetscScalar    **rbuf2,**sbuf2;
 72:   PetscBool      sorted;

 74:   PetscObjectGetComm((PetscObject)C,&comm);
 75:   tag0 = ((PetscObject)C)->tag;
 76:   MPI_Comm_rank(comm,&rank);
 77:   MPI_Comm_size(comm,&size);
 78:   m    = C->rmap->N;

 80:   /* Get some new tags to keep the communication clean */
 81:   PetscObjectGetNewTag((PetscObject)C,&tag1);

 83:   /* Check if the col indices are sorted */
 84:   for (i=0; i<ismax; i++) {
 85:     ISSorted(isrow[i],&sorted);
 87:     ISSorted(iscol[i],&sorted);
 89:   }

 91:   PetscMalloc5(ismax,(PetscInt***)&irow,ismax,(PetscInt***)&icol,ismax,&nrow,ismax,&ncol,m,&rtable);
 92:   for (i=0; i<ismax; i++) {
 93:     ISGetIndices(isrow[i],&irow[i]);
 94:     ISGetIndices(iscol[i],&icol[i]);
 95:     ISGetLocalSize(isrow[i],&nrow[i]);
 96:     ISGetLocalSize(iscol[i],&ncol[i]);
 97:   }

 99:   /* Create hash table for the mapping :row -> proc*/
100:   for (i=0,j=0; i<size; i++) {
101:     jmax = C->rmap->range[i+1];
102:     for (; j<jmax; j++) rtable[j] = i;
103:   }

105:   /* evaluate communication - mesg to who,length of mesg, and buffer space
106:      required. Based on this, buffers are allocated, and data copied into them*/
107:   PetscMalloc3(2*size,&w1,size,&w3,size,&w4);
108:   PetscArrayzero(w1,size*2); /* initialize work vector*/
109:   PetscArrayzero(w3,size); /* initialize work vector*/
110:   for (i=0; i<ismax; i++) {
111:     PetscArrayzero(w4,size); /* initialize work vector*/
112:     jmax   = nrow[i];
113:     irow_i = irow[i];
114:     for (j=0; j<jmax; j++) {
115:       row  = irow_i[j];
116:       proc = rtable[row];
117:       w4[proc]++;
118:     }
119:     for (j=0; j<size; j++) {
120:       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
121:     }
122:   }

124:   nrqs       = 0;              /* no of outgoing messages */
125:   msz        = 0;              /* total mesg length (for all procs) */
126:   w1[2*rank] = 0;              /* no mesg sent to self */
127:   w3[rank]   = 0;
128:   for (i=0; i<size; i++) {
129:     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
130:   }
131:   PetscMalloc1(nrqs+1,&pa); /*(proc -array)*/
132:   for (i=0,j=0; i<size; i++) {
133:     if (w1[2*i]) { pa[j] = i; j++; }
134:   }

136:   /* Each message would have a header = 1 + 2*(no of IS) + data */
137:   for (i=0; i<nrqs; i++) {
138:     j        = pa[i];
139:     w1[2*j] += w1[2*j+1] + 2* w3[j];
140:     msz     += w1[2*j];
141:   }
142:   /* Do a global reduction to determine how many messages to expect*/
143:   PetscMaxSum(comm,w1,&bsz,&nrqr);

145:   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
146:   PetscMalloc1(nrqr+1,&rbuf1);
147:   PetscMalloc1(nrqr*bsz,&rbuf1[0]);
148:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;

150:   /* Post the receives */
151:   PetscMalloc1(nrqr+1,&r_waits1);
152:   for (i=0; i<nrqr; ++i) {
153:     MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
154:   }

156:   /* Allocate Memory for outgoing messages */
157:   PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);
158:   PetscArrayzero(sbuf1,size);
159:   PetscArrayzero(ptr,size);
160:   {
161:     PetscInt *iptr = tmp,ict = 0;
162:     for (i=0; i<nrqs; i++) {
163:       j        = pa[i];
164:       iptr    += ict;
165:       sbuf1[j] = iptr;
166:       ict      = w1[2*j];
167:     }
168:   }

170:   /* Form the outgoing messages */
171:   /* Initialize the header space */
172:   for (i=0; i<nrqs; i++) {
173:     j           = pa[i];
174:     sbuf1[j][0] = 0;
175:     PetscArrayzero(sbuf1[j]+1,2*w3[j]);
176:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
177:   }

179:   /* Parse the isrow and copy data into outbuf */
180:   for (i=0; i<ismax; i++) {
181:     PetscArrayzero(ctr,size);
182:     irow_i = irow[i];
183:     jmax   = nrow[i];
184:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
185:       row  = irow_i[j];
186:       proc = rtable[row];
187:       if (proc != rank) { /* copy to the outgoing buf*/
188:         ctr[proc]++;
189:         *ptr[proc] = row;
190:         ptr[proc]++;
191:       }
192:     }
193:     /* Update the headers for the current IS */
194:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
195:       if ((ctr_j = ctr[j])) {
196:         sbuf1_j        = sbuf1[j];
197:         k              = ++sbuf1_j[0];
198:         sbuf1_j[2*k]   = ctr_j;
199:         sbuf1_j[2*k-1] = i;
200:       }
201:     }
202:   }

204:   /*  Now  post the sends */
205:   PetscMalloc1(nrqs+1,&s_waits1);
206:   for (i=0; i<nrqs; ++i) {
207:     j    = pa[i];
208:     MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
209:   }

211:   /* Post receives to capture the row_data from other procs */
212:   PetscMalloc1(nrqs+1,&r_waits2);
213:   PetscMalloc1(nrqs+1,&rbuf2);
214:   for (i=0; i<nrqs; i++) {
215:     j     = pa[i];
216:     count = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
217:     PetscMalloc1(count+1,&rbuf2[i]);
218:     MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
219:   }

221:   /* Receive messages(row_nos) and then, pack and send off the rowvalues
222:      to the correct processors */

224:   PetscMalloc1(nrqr+1,&s_waits2);
225:   PetscMalloc1(nrqr+1,&r_status1);
226:   PetscMalloc1(nrqr+1,&sbuf2);

228:   {
229:     PetscScalar *sbuf2_i,*v_start;
230:     PetscInt    s_proc;
231:     for (i=0; i<nrqr; ++i) {
232:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
233:       s_proc  = r_status1[i].MPI_SOURCE;         /* send processor */
234:       rbuf1_i = rbuf1[idex];         /* Actual message from s_proc */
235:       /* no of rows = end - start; since start is array idex[], 0idex, whel end
236:          is length of the buffer - which is 1idex */
237:       start = 2*rbuf1_i[0] + 1;
238:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
239:       /* allocate memory sufficinet to hold all the row values */
240:       PetscMalloc1((end-start)*N,&sbuf2[idex]);
241:       sbuf2_i = sbuf2[idex];
242:       /* Now pack the data */
243:       for (j=start; j<end; j++) {
244:         row     = rbuf1_i[j] - rstart;
245:         v_start = a->v + row;
246:         for (k=0; k<N; k++) {
247:           sbuf2_i[0] = v_start[0];
248:           sbuf2_i++;
249:           v_start += a->lda;
250:         }
251:       }
252:       /* Now send off the data */
253:       MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
254:     }
255:   }
256:   /* End Send-Recv of IS + row_numbers */
257:   PetscFree(r_status1);
258:   PetscFree(r_waits1);
259:   PetscMalloc1(nrqs+1,&s_status1);
260:   if (nrqs) MPI_Waitall(nrqs,s_waits1,s_status1);
261:   PetscFree(s_status1);
262:   PetscFree(s_waits1);

264:   /* Create the submatrices */
265:   if (scall == MAT_REUSE_MATRIX) {
266:     for (i=0; i<ismax; i++) {
267:       mat = (Mat_SeqDense*)(submats[i]->data);
269:       PetscArrayzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n);

271:       submats[i]->factortype = C->factortype;
272:     }
273:   } else {
274:     for (i=0; i<ismax; i++) {
275:       MatCreate(PETSC_COMM_SELF,submats+i);
276:       MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
277:       MatSetType(submats[i],((PetscObject)A)->type_name);
278:       MatSeqDenseSetPreallocation(submats[i],NULL);
279:     }
280:   }

282:   /* Assemble the matrices */
283:   {
284:     PetscInt    col;
285:     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;

287:     for (i=0; i<ismax; i++) {
288:       mat    = (Mat_SeqDense*)submats[i]->data;
289:       mat_v  = a->v;
290:       imat_v = mat->v;
291:       irow_i = irow[i];
292:       m      = nrow[i];
293:       for (j=0; j<m; j++) {
294:         row  = irow_i[j];
295:         proc = rtable[row];
296:         if (proc == rank) {
297:           row     = row - rstart;
298:           mat_vi  = mat_v + row;
299:           imat_vi = imat_v + j;
300:           for (k=0; k<ncol[i]; k++) {
301:             col          = icol[i][k];
302:             imat_vi[k*m] = mat_vi[col*a->lda];
303:           }
304:         }
305:       }
306:     }
307:   }

309:   /* Create row map-> This maps c->row to submat->row for each submat*/
310:   /* this is a very expensive operation wrt memory usage */
311:   PetscMalloc1(ismax,&rmap);
312:   PetscCalloc1(ismax*C->rmap->N,&rmap[0]);
313:   for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N;
314:   for (i=0; i<ismax; i++) {
315:     rmap_i = rmap[i];
316:     irow_i = irow[i];
317:     jmax   = nrow[i];
318:     for (j=0; j<jmax; j++) {
319:       rmap_i[irow_i[j]] = j;
320:     }
321:   }

323:   /* Now Receive the row_values and assemble the rest of the matrix */
324:   PetscMalloc1(nrqs+1,&r_status2);
325:   {
326:     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
327:     PetscScalar *rbuf2_i,*imat_v,*imat_vi;

329:     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
330:       MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
331:       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
332:       sbuf1_i = sbuf1[pa[i]];
333:       is_max  = sbuf1_i[0];
334:       ct1     = 2*is_max+1;
335:       rbuf2_i = rbuf2[i];
336:       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
337:         is_no  = sbuf1_i[2*j-1];
338:         is_sz  = sbuf1_i[2*j];
339:         mat    = (Mat_SeqDense*)submats[is_no]->data;
340:         imat_v = mat->v;
341:         rmap_i = rmap[is_no];
342:         m      = nrow[is_no];
343:         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
344:           row     = sbuf1_i[ct1]; ct1++;
345:           row     = rmap_i[row];
346:           imat_vi = imat_v + row;
347:           for (l=0; l<ncol[is_no]; l++) { /* For each col */
348:             col          = icol[is_no][l];
349:             imat_vi[l*m] = rbuf2_i[col];
350:           }
351:         }
352:       }
353:     }
354:   }
355:   /* End Send-Recv of row_values */
356:   PetscFree(r_status2);
357:   PetscFree(r_waits2);
358:   PetscMalloc1(nrqr+1,&s_status2);
359:   if (nrqr) MPI_Waitall(nrqr,s_waits2,s_status2);
360:   PetscFree(s_status2);
361:   PetscFree(s_waits2);

363:   /* Restore the indices */
364:   for (i=0; i<ismax; i++) {
365:     ISRestoreIndices(isrow[i],irow+i);
366:     ISRestoreIndices(iscol[i],icol+i);
367:   }

369:   PetscFree5(*(PetscInt***)&irow,*(PetscInt***)&icol,nrow,ncol,rtable);
370:   PetscFree3(w1,w3,w4);
371:   PetscFree(pa);

373:   for (i=0; i<nrqs; ++i) {
374:     PetscFree(rbuf2[i]);
375:   }
376:   PetscFree(rbuf2);
377:   PetscFree4(sbuf1,ptr,tmp,ctr);
378:   PetscFree(rbuf1[0]);
379:   PetscFree(rbuf1);

381:   for (i=0; i<nrqr; ++i) {
382:     PetscFree(sbuf2[i]);
383:   }

385:   PetscFree(sbuf2);
386:   PetscFree(rmap[0]);
387:   PetscFree(rmap);

389:   for (i=0; i<ismax; i++) {
390:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
391:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
392:   }
393:   return 0;
394: }

396: PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
397: {
398:   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;

400:   MatScale(A->A,alpha);
401:   return 0;
402: }