Actual source code: mpi.c

  1: /*
  2:       This provides a few of the MPI-uni functions that cannot be implemented
  3:     with C macros
  4: */
  5: #include <petscsys.h>
  6: #if !defined(MPIUNI_H)
  7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
  8: #endif

 10: #include <petsc/private/petscimpl.h>

 12: #if defined(PETSC_HAVE_CUDA)
 13:   #include <cuda_runtime.h>
 14: #endif

 16: #if defined(PETSC_HAVE_HIP)
 17:   #include <hip/hip_runtime.h>
 18: #endif

 20: #define MPI_SUCCESS 0
 21: #define MPI_FAILURE 1

 23: void *MPIUNI_TMP = NULL;

 25: /*
 26:        With MPI Uni there are exactly four distinct communicators:
 27:     MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)

 29:     MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
 30:      the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).

 32: */
 33: #define MAX_ATTR 256
 34: #define MAX_COMM 128

 36: typedef struct {
 37:   void *attribute_val;
 38:   int  active;
 39: } MPI_Attr;

 41: typedef struct {
 42:   void                *extra_state;
 43:   MPI_Delete_function *del;
 44:   int                 active;  /* Is this keyval in use by some comm? */
 45: } MPI_Attr_keyval;

 47: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
 48: static MPI_Attr        attr[MAX_COMM][MAX_ATTR];
 49: static int             comm_active[MAX_COMM];  /* Boolean array indicating which comms are in use */
 50: static int             mpi_tag_ub = 100000000;
 51: static int             num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
 52: static int             MaxComm  = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
 53: static void*           MPIUNIF_mpi_in_place = 0;

 55: #define CommIdx(comm)  ((comm)-1)  /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */

 57: #if defined(__cplusplus)
 58: extern "C" {
 59: #endif

 61: /*
 62:    To avoid problems with prototypes to the system memcpy() it is duplicated here
 63: */
 64: int MPIUNI_Memcpy(void *dst,const void *src,int n)
 65: {
 66:   if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 67:   if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
 68:   if (!n) return MPI_SUCCESS;

 70:   /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
 71: #if defined(PETSC_HAVE_CUDA)
 72:   if (PetscCUDAInitialized) {cudaError_t cerr = cudaMemcpy(dst,src,n,cudaMemcpyDefault);if (cerr != cudaSuccess) return MPI_FAILURE;} else
 73: #elif defined(PETSC_HAVE_HIP)
 74:   if (PetscHIPInitialized)  {hipError_t  cerr = hipMemcpy(dst,src,n,hipMemcpyDefault);  if (cerr != hipSuccess)  return MPI_FAILURE;} else
 75: #endif
 76:   {memcpy(dst,src,n);}
 77:   return MPI_SUCCESS;
 78: }

 80: static int classcnt = 0;
 81: static int codecnt = 0;

 83: int MPI_Add_error_class(int *cl)
 84: {
 85:   *cl = classcnt++;
 86:   return MPI_SUCCESS;
 87: }

 89: int MPI_Add_error_code(int cl,int *co)
 90: {
 91:   if (cl >= classcnt) return MPI_FAILURE;
 92:   *co = codecnt++;
 93:   return MPI_SUCCESS;
 94: }

 96: int MPI_Type_get_envelope(MPI_Datatype datatype,int *num_integers,int *num_addresses,int *num_datatypes,int *combiner)
 97: {
 98:   int comb = datatype >> 28;
 99:   switch (comb) {
100:   case MPI_COMBINER_NAMED:
101:     *num_integers = 0;
102:     *num_addresses = 0;
103:     *num_datatypes = 0;
104:     *combiner = comb;
105:     break;
106:   case MPI_COMBINER_DUP:
107:     *num_integers = 0;
108:     *num_addresses = 0;
109:     *num_datatypes = 1;
110:     *combiner = comb;
111:     break;
112:   case MPI_COMBINER_CONTIGUOUS:
113:     *num_integers = 1;
114:     *num_addresses = 0;
115:     *num_datatypes = 1;
116:     *combiner = comb;
117:     break;
118:   default:
119:     return MPIUni_Abort(MPI_COMM_SELF,1);
120:   }
121:   return MPI_SUCCESS;
122: }

124: int MPI_Type_get_contents(MPI_Datatype datatype,int max_integers,int max_addresses,int max_datatypes,int *array_of_integers,MPI_Aint *array_of_addresses,MPI_Datatype *array_of_datatypes)
125: {
126:   int comb = datatype >> 28;
127:   switch (comb) {
128:   case MPI_COMBINER_NAMED:
129:     return MPIUni_Abort(MPI_COMM_SELF,1);
130:   case MPI_COMBINER_DUP:
131:     if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
132:     array_of_datatypes[0] = datatype & 0x0fffffff;
133:     break;
134:   case MPI_COMBINER_CONTIGUOUS:
135:     if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF,1);
136:     array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
137:     array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100;  /* basic named type (count=1) from which the contiguous type is derived */
138:     break;
139:   default:
140:     return MPIUni_Abort(MPI_COMM_SELF,1);
141:   }
142:   return MPI_SUCCESS;
143: }

145: /*
146:    Used to set the built-in MPI_TAG_UB attribute
147: */
148: static int Keyval_setup(void)
149: {
150:   attr[CommIdx(MPI_COMM_WORLD)][0].active        = 1;
151:   attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
152:   attr[CommIdx(MPI_COMM_SELF)][0].active        = 1;
153:   attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
154:   attr_keyval[0].active                          = 1;
155:   return MPI_SUCCESS;
156: }

158: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
159: {
160:   int i,keyid;
161:   for (i=1; i<num_attr; i++) { /* the first attribute is always in use */
162:     if (!attr_keyval[i].active) {
163:       keyid = i;
164:       goto found;
165:     }
166:   }
167:   if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD,1);
168:   keyid = num_attr++;

170: found:
171:   attr_keyval[keyid].extra_state = extra_state;
172:   attr_keyval[keyid].del         = delete_fn;
173:   attr_keyval[keyid].active      = 1;
174:   *keyval                        = keyid;
175:   return MPI_SUCCESS;
176: }

178: int MPI_Comm_free_keyval(int *keyval)
179: {
180:   attr_keyval[*keyval].extra_state = 0;
181:   attr_keyval[*keyval].del         = 0;
182:   attr_keyval[*keyval].active      = 0;
183:   *keyval = 0;
184:   return MPI_SUCCESS;
185: }

187: int MPI_Comm_set_attr(MPI_Comm comm,int keyval,void *attribute_val)
188: {
189:   int idx = CommIdx(comm);
190:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
191:   attr[idx][keyval].active        = 1;
192:   attr[idx][keyval].attribute_val = attribute_val;
193:   return MPI_SUCCESS;
194: }

196: int MPI_Comm_delete_attr(MPI_Comm comm,int keyval)
197: {
198:   int idx = CommIdx(comm);
199:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
200:   if (attr[idx][keyval].active && attr_keyval[keyval].del) {
201:     void *save_attribute_val        = attr[idx][keyval].attribute_val;
202:     attr[idx][keyval].active        = 0;
203:     attr[idx][keyval].attribute_val = 0;
204:     (*(attr_keyval[keyval].del))(comm,keyval,save_attribute_val,attr_keyval[keyval].extra_state);
205:   }
206:   return MPI_SUCCESS;
207: }

209: int MPI_Comm_get_attr(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
210: {
211:   int idx = CommIdx(comm);
212:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
213:   if (!keyval) Keyval_setup();
214:   *flag                  = attr[idx][keyval].active;
215:   *(void**)attribute_val = attr[idx][keyval].attribute_val;
216:   return MPI_SUCCESS;
217: }

219: int MPI_Comm_create(MPI_Comm comm,MPI_Group group,MPI_Comm *newcomm)
220: {
221:   int j;
222:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
223:   for (j=3; j<=MaxComm; j++) {
224:     if (!comm_active[CommIdx(j)]) {
225:       comm_active[CommIdx(j)] = 1;
226:       *newcomm = j;
227:       return MPI_SUCCESS;
228:     }
229:   }
230:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
231:   *newcomm = ++MaxComm;
232:   comm_active[CommIdx(*newcomm)] = 1;
233:   return MPI_SUCCESS;
234: }

236: int MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
237: {
238:   int j;
239:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
240:   for (j=3; j<=MaxComm; j++) {
241:     if (!comm_active[CommIdx(j)]) {
242:       comm_active[CommIdx(j)] = 1;
243:       *out = j;
244:       return MPI_SUCCESS;
245:     }
246:   }
247:   if (MaxComm >= MAX_COMM) return MPI_FAILURE;
248:   *out = ++MaxComm;
249:   comm_active[CommIdx(*out)] = 1;
250:   return MPI_SUCCESS;
251: }

253: int MPI_Comm_free(MPI_Comm *comm)
254: {
255:   int i;
256:   int idx = CommIdx(*comm);

258:   if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
259:   for (i=0; i<num_attr; i++) {
260:     if (attr[idx][i].active && attr_keyval[i].del) (*attr_keyval[i].del)(*comm,i,attr[idx][i].attribute_val,attr_keyval[i].extra_state);
261:     attr[idx][i].active        = 0;
262:     attr[idx][i].attribute_val = 0;
263:   }
264:   if (*comm >= 3) comm_active[idx] = 0;
265:   *comm = 0;
266:   return MPI_SUCCESS;
267: }

269: int MPI_Comm_size(MPI_Comm comm, int *size)
270: {
271:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
272:   *size=1;
273:   return MPI_SUCCESS;
274: }

276: int MPI_Comm_rank(MPI_Comm comm, int *rank)
277: {
278:   if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
279:   *rank=0;
280:   return MPI_SUCCESS;
281: }

283: int MPIUni_Abort(MPI_Comm comm,int errorcode)
284: {
285:   printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
286:   return MPI_FAILURE;
287: }

289: int MPI_Abort(MPI_Comm comm,int errorcode)
290: {
291:   abort();
292:   return MPI_SUCCESS;
293: }

295: /* --------------------------------------------------------------------------*/

297: static int MPI_was_initialized = 0;
298: static int MPI_was_finalized   = 0;

300: int MPI_Init(int *argc, char ***argv)
301: {
302:   if (MPI_was_initialized) return MPI_FAILURE;
303:   /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
304:      MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
305:      of MPIU so that they can do multiple PetscInitialize/Finalize().
306:   */
307:   /* if (MPI_was_finalized) return MPI_FAILURE; */
308:   MPI_was_initialized = 1;
309:   MPI_was_finalized   = 0;
310:   return MPI_SUCCESS;
311: }

313: int MPI_Finalize(void)
314: {
315:   MPI_Comm comm;
316:   if (MPI_was_finalized) return MPI_FAILURE;
317:   if (!MPI_was_initialized) return MPI_FAILURE;
318:   comm = MPI_COMM_WORLD;
319:   MPI_Comm_free(&comm);
320:   comm = MPI_COMM_SELF;
321:   MPI_Comm_free(&comm);
322: #if defined(PETSC_USE_DEBUG)
323:   {
324:     int i;
325:     for (i=3; i<=MaxComm; i++) {
326:       if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
327:     }
328:   }
329: #endif
330:   /* reset counters */
331:   MaxComm  = 2;
332:   num_attr = 1;
333:   MPI_was_finalized   = 1;
334:   MPI_was_initialized = 0;
335:   PETSC_COMM_WORLD    = MPI_COMM_NULL;
336:   return MPI_SUCCESS;
337: }

339: int MPI_Initialized(int *flag)
340: {
341:   *flag = MPI_was_initialized;
342:   return MPI_SUCCESS;
343: }

345: int MPI_Finalized(int *flag)
346: {
347:   *flag = MPI_was_finalized;
348:   return MPI_SUCCESS;
349: }

351: /* -------------------     Fortran versions of several routines ------------------ */

353: #if defined(PETSC_HAVE_FORTRAN_CAPS)
354: #define mpiunisetmoduleblock_          MPIUNISETMODULEBLOCK
355: #define mpiunisetfortranbasepointers_  MPIUNISETFORTRANBASEPOINTERS
356: #define petsc_mpi_init_                PETSC_MPI_INIT
357: #define petsc_mpi_finalize_            PETSC_MPI_FINALIZE
358: #define petsc_mpi_comm_size_           PETSC_MPI_COMM_SIZE
359: #define petsc_mpi_comm_rank_           PETSC_MPI_COMM_RANK
360: #define petsc_mpi_abort_               PETSC_MPI_ABORT
361: #define petsc_mpi_reduce_              PETSC_MPI_REDUCE
362: #define petsc_mpi_allreduce_           PETSC_MPI_ALLREDUCE
363: #define petsc_mpi_barrier_             PETSC_MPI_BARRIER
364: #define petsc_mpi_bcast_               PETSC_MPI_BCAST
365: #define petsc_mpi_gather_              PETSC_MPI_GATHER
366: #define petsc_mpi_allgather_           PETSC_MPI_ALLGATHER
367: #define petsc_mpi_comm_split_          PETSC_MPI_COMM_SPLIT
368: #define petsc_mpi_scan_                PETSC_MPI_SCAN
369: #define petsc_mpi_send_                PETSC_MPI_SEND
370: #define petsc_mpi_recv_                PETSC_MPI_RECV
371: #define petsc_mpi_reduce_scatter_      PETSC_MPI_REDUCE_SCATTER
372: #define petsc_mpi_irecv_               PETSC_MPI_IRECV
373: #define petsc_mpi_isend_               PETSC_MPI_ISEND
374: #define petsc_mpi_sendrecv_            PETSC_MPI_SENDRECV
375: #define petsc_mpi_test_                PETSC_MPI_TEST
376: #define petsc_mpi_waitall_             PETSC_MPI_WAITALL
377: #define petsc_mpi_waitany_             PETSC_MPI_WAITANY
378: #define petsc_mpi_allgatherv_          PETSC_MPI_ALLGATHERV
379: #define petsc_mpi_alltoallv_           PETSC_MPI_ALLTOALLV
380: #define petsc_mpi_comm_create_         PETSC_MPI_COMM_CREATE
381: #define petsc_mpi_address_             PETSC_MPI_ADDRESS
382: #define petsc_mpi_pack_                PETSC_MPI_PACK
383: #define petsc_mpi_unpack_              PETSC_MPI_UNPACK
384: #define petsc_mpi_pack_size_           PETSC_MPI_PACK_SIZE
385: #define petsc_mpi_type_struct_         PETSC_MPI_TYPE_STRUCT
386: #define petsc_mpi_type_commit_         PETSC_MPI_TYPE_COMMIT
387: #define petsc_mpi_wtime_               PETSC_MPI_WTIME
388: #define petsc_mpi_cancel_              PETSC_MPI_CANCEL
389: #define petsc_mpi_comm_dup_            PETSC_MPI_COMM_DUP
390: #define petsc_mpi_comm_free_           PETSC_MPI_COMM_FREE
391: #define petsc_mpi_get_count_           PETSC_MPI_GET_COUNT
392: #define petsc_mpi_get_processor_name_  PETSC_MPI_GET_PROCESSOR_NAME
393: #define petsc_mpi_initialized_         PETSC_MPI_INITIALIZED
394: #define petsc_mpi_iprobe_              PETSC_MPI_IPROBE
395: #define petsc_mpi_probe_               PETSC_MPI_PROBE
396: #define petsc_mpi_request_free_        PETSC_MPI_REQUEST_FREE
397: #define petsc_mpi_ssend_               PETSC_MPI_SSEND
398: #define petsc_mpi_wait_                PETSC_MPI_WAIT
399: #define petsc_mpi_comm_group_          PETSC_MPI_COMM_GROUP
400: #define petsc_mpi_exscan_              PETSC_MPI_EXSCAN
401: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
402: #define mpiunisetmoduleblock_          mpiunisetmoduleblock
403: #define mpiunisetfortranbasepointers_  mpiunisetfortranbasepointers
404: #define petsc_mpi_init_                petsc_mpi_init
405: #define petsc_mpi_finalize_            petsc_mpi_finalize
406: #define petsc_mpi_comm_size_           petsc_mpi_comm_size
407: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank
408: #define petsc_mpi_abort_               petsc_mpi_abort
409: #define petsc_mpi_reduce_              petsc_mpi_reduce
410: #define petsc_mpi_allreduce_           petsc_mpi_allreduce
411: #define petsc_mpi_barrier_             petsc_mpi_barrier
412: #define petsc_mpi_bcast_               petsc_mpi_bcast
413: #define petsc_mpi_gather_              petsc_mpi_gather
414: #define petsc_mpi_allgather_           petsc_mpi_allgather
415: #define petsc_mpi_comm_split_          petsc_mpi_comm_split
416: #define petsc_mpi_scan_                petsc_mpi_scan
417: #define petsc_mpi_send_                petsc_mpi_send
418: #define petsc_mpi_recv_                petsc_mpi_recv
419: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter
420: #define petsc_mpi_irecv_               petsc_mpi_irecv
421: #define petsc_mpi_isend_               petsc_mpi_isend
422: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv
423: #define petsc_mpi_test_                petsc_mpi_test
424: #define petsc_mpi_waitall_             petsc_mpi_waitall
425: #define petsc_mpi_waitany_             petsc_mpi_waitany
426: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv
427: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv
428: #define petsc_mpi_comm_create_         petsc_mpi_comm_create
429: #define petsc_mpi_address_             petsc_mpi_address
430: #define petsc_mpi_pack_                petsc_mpi_pack
431: #define petsc_mpi_unpack_              petsc_mpi_unpack
432: #define petsc_mpi_pack_size_           petsc_mpi_pack_size
433: #define petsc_mpi_type_struct_         petsc_mpi_type_struct
434: #define petsc_mpi_type_commit_         petsc_mpi_type_commit
435: #define petsc_mpi_wtime_               petsc_mpi_wtime
436: #define petsc_mpi_cancel_              petsc_mpi_cancel
437: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup
438: #define petsc_mpi_comm_free_           petsc_mpi_comm_free
439: #define petsc_mpi_get_count_           petsc_mpi_get_count
440: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name
441: #define petsc_mpi_initialized_         petsc_mpi_initialized
442: #define petsc_mpi_iprobe_              petsc_mpi_iprobe
443: #define petsc_mpi_probe_               petsc_mpi_probe
444: #define petsc_mpi_request_free_        petsc_mpi_request_free
445: #define petsc_mpi_ssend_               petsc_mpi_ssend
446: #define petsc_mpi_wait_                petsc_mpi_wait
447: #define petsc_mpi_comm_group_          petsc_mpi_comm_group
448: #define petsc_mpi_exscan_              petsc_mpi_exscan
449: #endif

451: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
452: #define petsc_mpi_init_                petsc_mpi_init__
453: #define petsc_mpi_finalize_            petsc_mpi_finalize__
454: #define petsc_mpi_comm_size_           petsc_mpi_comm_size__
455: #define petsc_mpi_comm_rank_           petsc_mpi_comm_rank__
456: #define petsc_mpi_abort_               petsc_mpi_abort__
457: #define petsc_mpi_reduce_              petsc_mpi_reduce__
458: #define petsc_mpi_allreduce_           petsc_mpi_allreduce__
459: #define petsc_mpi_barrier_             petsc_mpi_barrier__
460: #define petsc_mpi_bcast_               petsc_mpi_bcast__
461: #define petsc_mpi_gather_              petsc_mpi_gather__
462: #define petsc_mpi_allgather_           petsc_mpi_allgather__
463: #define petsc_mpi_comm_split_          petsc_mpi_comm_split__
464: #define petsc_mpi_scan_                petsc_mpi_scan__
465: #define petsc_mpi_send_                petsc_mpi_send__
466: #define petsc_mpi_recv_                petsc_mpi_recv__
467: #define petsc_mpi_reduce_scatter_      petsc_mpi_reduce_scatter__
468: #define petsc_mpi_irecv_               petsc_mpi_irecv__
469: #define petsc_mpi_isend_               petsc_mpi_isend__
470: #define petsc_mpi_sendrecv_            petsc_mpi_sendrecv__
471: #define petsc_mpi_test_                petsc_mpi_test__
472: #define petsc_mpi_waitall_             petsc_mpi_waitall__
473: #define petsc_mpi_waitany_             petsc_mpi_waitany__
474: #define petsc_mpi_allgatherv_          petsc_mpi_allgatherv__
475: #define petsc_mpi_alltoallv_           petsc_mpi_alltoallv__
476: #define petsc_mpi_comm_create_         petsc_mpi_comm_create__
477: #define petsc_mpi_address_             petsc_mpi_address__
478: #define petsc_mpi_pack_                petsc_mpi_pack__
479: #define petsc_mpi_unpack_              petsc_mpi_unpack__
480: #define petsc_mpi_pack_size_           petsc_mpi_pack_size__
481: #define petsc_mpi_type_struct_         petsc_mpi_type_struct__
482: #define petsc_mpi_type_commit_         petsc_mpi_type_commit__
483: #define petsc_mpi_wtime_               petsc_mpi_wtime__
484: #define petsc_mpi_cancel_              petsc_mpi_cancel__
485: #define petsc_mpi_comm_dup_            petsc_mpi_comm_dup__
486: #define petsc_mpi_comm_free_           petsc_mpi_comm_free__
487: #define petsc_mpi_get_count_           petsc_mpi_get_count__
488: #define petsc_mpi_get_processor_name_  petsc_mpi_get_processor_name__
489: #define petsc_mpi_initialized_         petsc_mpi_initialized__
490: #define petsc_mpi_iprobe_              petsc_mpi_iprobe__
491: #define petsc_mpi_probe_               petsc_mpi_probe__
492: #define petsc_mpi_request_free_        petsc_mpi_request_free__
493: #define petsc_mpi_ssend_               petsc_mpi_ssend__
494: #define petsc_mpi_wait_                petsc_mpi_wait__
495: #define petsc_mpi_comm_group_          petsc_mpi_comm_group__
496: #define petsc_mpi_exscan_              petsc_mpi_exscan__
497: #endif

499: /* Do not build fortran interface if MPI namespace colision is to be avoided */
500: #if defined(PETSC_HAVE_FORTRAN)

502: PETSC_EXTERN void mpiunisetmoduleblock_(void);

504: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
505: {
506:   MPIUNIF_mpi_in_place   = f_mpi_in_place;
507: }

509: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
510: {
511:   mpiunisetmoduleblock_();
512:   *MPI_Init((int*)0, (char***)0);
513: }

515: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
516: {
517:   *MPI_Finalize();
518: }

520: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
521: {
522:   *size = 1;
523:   *0;
524: }

526: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
527: {
528:   *rank = 0;
529:   *MPI_SUCCESS;
530: }

532: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
533: {
534:   *newcomm = *comm;
535:   *MPI_SUCCESS;
536: }

538: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
539: {
540:   abort();
541:   *MPI_SUCCESS;
542: }

544: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *root,int *comm,int *ierr)
545: {
546:   *MPI_Reduce(sendbuf,recvbuf,*count,*datatype,*op,*root,*comm);
547: }

549: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
550: {
551:   *MPI_Allreduce(sendbuf,recvbuf,*count,*datatype,*op,*comm);
552: }

554: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm,int *ierr)
555: {
556:   *MPI_SUCCESS;
557: }

559: PETSC_EXTERN void petsc_mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
560: {
561:   *MPI_SUCCESS;
562: }

564: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root,int *comm,int *ierr)
565: {
566:   *MPI_Gather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*root,*comm);
567: }

569: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void *recvbuf, int *rcount, int *rdatatype,int *comm,int *ierr)
570: {
571:   *MPI_Allgather(sendbuf,*scount,*sdatatype,recvbuf,rcount,rdatatype,*comm);
572: }

574: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
575: {
576:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPI_sizeof(*datatype));
577: }

579: PETSC_EXTERN void petsc_mpi_send_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
580: {
581:   *MPIUni_Abort(MPI_COMM_WORLD,0);
582: }

584: PETSC_EXTERN void petsc_mpi_recv_(void *buf,int *count,int *datatype,int *source,int *tag,int *comm,int status,int *ierr)
585: {
586:   *MPIUni_Abort(MPI_COMM_WORLD,0);
587: }

589: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf,void *recvbuf,int *recvcounts,int *datatype,int *op,int *comm,int *ierr)
590: {
591:   *MPIUni_Abort(MPI_COMM_WORLD,0);
592: }

594: PETSC_EXTERN void petsc_mpi_irecv_(void *buf,int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
595: {
596:   *MPIUni_Abort(MPI_COMM_WORLD,0);
597: }

599: PETSC_EXTERN void petsc_mpi_isend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *request, int *ierr)
600: {
601:   *MPIUni_Abort(MPI_COMM_WORLD,0);
602: }

604: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf,int *sendcount,int *sendtype,int *dest,int *sendtag,void *recvbuf,int *recvcount,int *recvtype,int *source,int *recvtag,int *comm,int *status,int *ierr)
605: {
606:   *MPIUNI_Memcpy(recvbuf,sendbuf,(*sendcount)*MPI_sizeof(*sendtype));
607: }

609: PETSC_EXTERN void petsc_mpi_test_(int *request,int *flag,int *status,int *ierr)
610: {
611:   *MPIUni_Abort(MPI_COMM_WORLD,0);
612: }

614: PETSC_EXTERN void petsc_mpi_waitall_(int *count,int *array_of_requests,int *array_of_statuses,int *ierr)
615: {
616:   *MPI_SUCCESS;
617: }

619: PETSC_EXTERN void petsc_mpi_waitany_(int *count,int *array_of_requests,int * index, int *status,int *ierr)
620: {
621:   *MPI_SUCCESS;
622: }

624: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf,int *sendcount,int *sendtype,void *recvbuf,int *recvcounts,int *displs,int *recvtype,int *comm,int *ierr)
625: {
626:   *MPI_Allgatherv(sendbuf,*sendcount,*sendtype,recvbuf,recvcounts,displs,*recvtype,*comm);
627: }

629: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf,int *sendcounts,int *sdispls,int *sendtype,void *recvbuf,int *recvcounts,int *rdispls,int *recvtype,int *comm,int *ierr)
630: {
631:   *MPI_Alltoallv(sendbuf,sendcounts,sdispls,*sendtype,recvbuf,recvcounts,rdispls,*recvtype,*comm);
632: }

634: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm,int *group,int *newcomm,int *ierr)
635: {
636:   *newcomm =  *comm;
637:   *MPI_SUCCESS;
638: }

640: PETSC_EXTERN void petsc_mpi_address_(void *location,MPI_Aint *address,int *ierr)
641: {
642:   *address =  (MPI_Aint) ((char *)location);
643:   *MPI_SUCCESS;
644: }

646: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf,int *incount,int *datatype,void *outbuf,int *outsize,int *position,int *comm,int *ierr)
647: {
648:   *MPIUni_Abort(MPI_COMM_WORLD,0);
649: }

651: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf,int *insize,int *position,void *outbuf,int *outcount,int *datatype,int *comm,int *ierr)
652: {
653:   *MPIUni_Abort(MPI_COMM_WORLD,0);
654: }

656: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount,int *datatype,int *comm,int *size,int *ierr)
657: {
658:   *MPIUni_Abort(MPI_COMM_WORLD,0);
659: }

661: PETSC_EXTERN void petsc_mpi_type_struct_(int *count,int *array_of_blocklengths,int * array_of_displaments,int *array_of_types,int *newtype,int *ierr)
662: {
663:   *MPIUni_Abort(MPI_COMM_WORLD,0);
664: }

666: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype,int *ierr)
667: {
668:   *MPI_SUCCESS;
669: }

671: double petsc_mpi_wtime_(void)
672: {
673:   return 0.0;
674: }

676: PETSC_EXTERN void petsc_mpi_cancel_(int *request,int *ierr)
677: {
678:   *MPI_SUCCESS;
679: }

681: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm,int *out,int *ierr)
682: {
683:   *out  = *comm;
684:   *MPI_SUCCESS;
685: }

687: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm,int *ierr)
688: {
689:   *MPI_SUCCESS;
690: }

692: PETSC_EXTERN void petsc_mpi_get_count_(int *status,int *datatype,int *count,int *ierr)
693: {
694:   *MPIUni_Abort(MPI_COMM_WORLD,0);
695: }

697: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name,int *result_len,int *ierr,PETSC_FORTRAN_CHARLEN_T len)
698: {
699:   MPIUNI_Memcpy(name,"localhost",9*sizeof(char));
700:   *result_len = 9;
701:   *MPI_SUCCESS;
702: }

704: PETSC_EXTERN void petsc_mpi_initialized_(int *flag,int *ierr)
705: {
706:   *flag = MPI_was_initialized;
707:   *MPI_SUCCESS;
708: }

710: PETSC_EXTERN void petsc_mpi_iprobe_(int *source,int *tag,int *comm,int *glag,int *status,int *ierr)
711: {
712:   *MPI_SUCCESS;
713: }

715: PETSC_EXTERN void petsc_mpi_probe_(int *source,int *tag,int *comm,int *flag,int *status,int *ierr)
716: {
717:   *MPI_SUCCESS;
718: }

720: PETSC_EXTERN void petsc_mpi_request_free_(int *request,int *ierr)
721: {
722:   *MPI_SUCCESS;
723: }

725: PETSC_EXTERN void petsc_mpi_ssend_(void *buf,int *count,int *datatype,int *dest,int *tag,int *comm,int *ierr)
726: {
727:   *MPIUni_Abort(MPI_COMM_WORLD,0);
728: }

730: PETSC_EXTERN void petsc_mpi_wait_(int *request,int *status,int *ierr)
731: {
732:   *MPI_SUCCESS;
733: }

735: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm,int *group,int *ierr)
736: {
737:   *MPI_SUCCESS;
738: }

740: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
741: {
742:   *MPI_SUCCESS;
743: }

745: #endif /* PETSC_HAVE_FORTRAN */

747: #if defined(__cplusplus)
748: }
749: #endif