mpi-forum / mpi-forum-historic

Migration of old MPI Forum Trac Tickets to GitHub. New issues belong on mpi-forum/mpi-issues.
http://www.mpi-forum.org
2 stars 3 forks source link

Correcting examples in topology chapter #335

Open mpiforumbot opened 8 years ago

mpiforumbot commented 8 years ago

Originally by gropp on 2012-03-07 09:43:17 -0600


I ran the code checker over the current version of the MPI-3 standard. Attached are my recommended fixes for the topology chapter. This should be considered a chapter committee issue, and is added as a ticket only to capture the issue.

Bill

Index: chap-topol/topol.tex
===================================================================
--- chap-topol/topol.tex    (revision 1116)
+++ chap-topol/topol.tex    (working copy)
@@ -802,7 +802,7 @@
 sources[0] = rank;
 degrees[0] = 8;
 MPI_Dist_graph_create(MPI_COMM_WORLD, 1, sources, degrees, destinations,
-                      weights, MPI_INFO_NULL, 1, comm_dist_graph)
+                      weights, MPI_INFO_NULL, 1, &comm_dist_graph);
 \end{verbatim}
 \end{example}

@@ -1567,7 +1567,8 @@
 MPI_Dist_graph_neighbors_count(comm,&indegree,&outdegree,&weighted);
 int *srcs=(int*)malloc(indegree*sizeof(int));
 int *dsts=(int*)malloc(outdegree*sizeof(int));
-MPI_Dist_graph_neighbors(comm,indegree,srcs,outdegree,dsts,MPI_UNWEIGHTED);
+MPI_Dist_graph_neighbors(comm,indegree,srcs,MPI_UNWEIGHTED,
+                         outdegree,dsts,MPI_UNWEIGHTED);
 int k,l;

 for(k=0; k<outdegree; ++k) 
@@ -1577,7 +1578,7 @@
   MPI_Irecv(recvbuf+l*recvcount*extent(recvtype),recvcount,recvtype,
             srcs[l],...); 

-MPI_Waitall(...)
+MPI_Waitall(...);
 \end{verbatim}

 Figure~\ref{fig:sparsegather} shows the neighborhood gather
@@ -1660,7 +1661,8 @@
 MPI_Dist_graph_neighbors_count(comm,&indegree,&outdegree,&weighted);
 int *srcs=(int*)malloc(indegree*sizeof(int));
 int *dsts=(int*)malloc(outdegree*sizeof(int));
-MPI_Dist_graph_neighbors(comm,indegree,srcs,outdegree,dsts,MPI_UNWEIGHTED);
+MPI_Dist_graph_neighbors(comm,indegree,srcs,MPI_UNWEIGHTED,
+                         outdegree,dsts,MPI_UNWEIGHTED);
 int k,l;

 for(k=0; k<outdegree; ++k) 
@@ -1670,13 +1672,13 @@
   MPI_Irecv(recvbuf+displs[l]*extent(recvtype),recvcounts[l],recvtype,
             srcs[l],...); 

-MPI_Waitall(...)
+MPI_Waitall(...);
 \end{verbatim}

 The type signature associated with \mpiarg{sendcount, sendtype}, at
 process {\tt j} must be equal to the type signature associated with
 \mpiarg{recvcounts}{\tt [l],} \mpiarg{recvtype} at any other process
-with {\tt srcs[l]==j}.
+
 This implies that the amount of data sent must be equal to the
 amount of data received, pairwise between every pair of 
 communicating processes.
@@ -1764,7 +1766,8 @@
 MPI_Dist_graph_neighbors_count(comm,&indegree,&outdegree,&weighted);
 int *srcs=(int*)malloc(indegree*sizeof(int));
 int *dsts=(int*)malloc(outdegree*sizeof(int));
-MPI_Dist_graph_neighbors(comm,indegree,srcs,outdegree,dsts,MPI_UNWEIGHTED);
+MPI_Dist_graph_neighbors(comm,indegree,srcs,MPI_UNWEIGHTED,
+                         outdegree,dsts,MPI_UNWEIGHTED);
 int k,l;

 for(k=0; k<outdegree; ++k)
@@ -1775,7 +1778,7 @@
   MPI_Irecv(recvbuf+l*recvcount*extent(recvtype),recvcount,recvtype,
             srcs[l],...); 

-MPI_Waitall(...)
+MPI_Waitall(...);
 \end{verbatim}

 The type signature associated with \mpiarg{sendcount, sendtype},
@@ -1844,7 +1847,8 @@
 MPI_Dist_graph_neighbors_count(comm,&indegree,&outdegree,&weighted);
 int *srcs=(int*)malloc(indegree*sizeof(int));
 int *dsts=(int*)malloc(outdegree*sizeof(int));
-MPI_Dist_graph_neighbors(comm,indegree,srcs,outdegree,dsts,MPI_UNWEIGHTED);
+MPI_Dist_graph_neighbors(comm,indegree,srcs,MPI_UNWEIGHTED,
+                         outdegree,dsts,MPI_UNWEIGHTED);
 int k,l;

 for(k=0; k<outdegree; ++k) 
@@ -1855,7 +1859,7 @@
   MPI_Irecv(recvbuf+rdispls[l]*extent(recvtype),recvcounts[l],recvtype,
             srcs[l],...); 

-MPI_Waitall(...)
+MPI_Waitall(...);
 \end{verbatim}

 The type signature associated with
@@ -1930,7 +1934,8 @@
 MPI_Dist_graph_neighbors_count(comm,&indegree,&outdegree,&weighted);
 int *srcs=(int*)malloc(indegree*sizeof(int));
 int *dsts=(int*)malloc(outdegree*sizeof(int));
-MPI_Dist_graph_neighbors(comm,indegree,srcs,outdegree,dsts,MPI_UNWEIGHTED);
+MPI_Dist_graph_neighbors(comm,indegree,srcs,MPI_UNWEIGHTED,
+                         outdegree,dsts,MPI_UNWEIGHTED);
 int k,l;

 for(k=0; k<outdegree; ++k) 
@@ -1939,7 +1944,7 @@
 for(l=0; l<indegree; ++l) 
   MPI_Irecv(recvbuf+rdispls[l],recvcounts[l], recvtypes[l],srcs[l],...); 

-MPI_Waitall(...)
+MPI_Waitall(...);
 \end{verbatim}

 The type signature associated with
@@ -2343,18 +2348,18 @@
 \MPIupdateBegin{3.0}{258}
 \begin{figure}
 %............................................................................
+% \begin{Verbatim}[commandchars=\\\$\^,commentchar=\%]
 %%HEADER
 %%LANG: FORTRAN90
 %%SKIPELIPSIS
 %%FRAGMENT
 %%ENDHEADER
-% \begin{Verbatim}[commandchars=\\\$\^,commentchar=\%]
 \begin{verbatim}
 INTEGER ndims, num_neigh
 LOGICAL reorder
 PARAMETER (ndims=2, num_neigh=4, reorder=.true.)
 INTEGER comm, comm_cart, dims(ndims), ierr
-INTEGER neigh_rank(num_neigh), own_coords(ndims), i, j
+INTEGER neigh_rank(num_neigh), own_coords(ndims), i, j, it
 LOGICAL periods(ndims)
 REAL u(0:101,0:101), f(0:101,0:101)
 DATA dims / ndims * 0 /
@@ -2364,7 +2369,7 @@
 periods(1) = .TRUE.
 periods(2) = .TRUE.
 !   Create a grid structure in WORLD group and inquire about own position
-CALL MPI_CART_CREATE (comm, ndims, dims, periods, reorder,
+CALL MPI_CART_CREATE (comm, ndims, dims, periods, reorder, &
                   comm_cart,ierr)
 CALL MPI_CART_GET (comm_cart, ndims, dims, periods, own_coords,ierr)
 i = own_coords(1)
@@ -2435,6 +2440,7 @@
 %%ENDHEADER
 \begin{Verbatim}[commandchars=\\\$\^,commentchar=\%]
 SUBROUTINE exchange (u, comm_cart, neigh_rank, num_neigh)
+USE MPI
 REAL u(0:101,0:101)
 INTEGER comm_cart, num_neigh, neigh_rank(num_neigh)
 INTEGER sndcounts(num_neigh), sdispls(num_neigh), sndtypes(num_neigh)
@@ -2443,7 +2449,7 @@
 INTEGER type_vec, i, ierr 
 !   The following initialization need to be done only once
 !   before the first call of exchange.
-CALL MPI_TYPE_EXTENT(MPI_REAL, lb, sizeofreal, ierr) 
+CALL MPI_TYPE_GET_EXTENT(MPI_REAL, lb, sizeofreal, ierr) 
 CALL MPI_TYPE_VECTOR (100, 1, 102, MPI_REAL, type_vec, ierr) 
 CALL MPI_TYPE_COMMIT (type_vec, ierr) 
 sndtypes(1) = type_vec
@@ -2465,7 +2471,7 @@
 rdispls(4) = (  1 + 101*102) * sizeofreal   ! first element of u(1:100,101)

 ! the following communication has to be done in each call of exchange 
-CALL MPI_NEIGHBOR_ALLTOALLW (u, sndcounts, sdispls, sndtypes,
+CALL MPI_NEIGHBOR_ALLTOALLW (u, sndcounts, sdispls, sndtypes, &
                            u, rcvcounts, rdispls, rcvtypes, comm_cart, ierr) 

 !   The following finalizing need to be done only once
mpiforumbot commented 8 years ago

Originally by htor on 2012-03-20 11:38:50 -0500


Thanks Bill! All examples are fixed in this chapter. As suggested, I didn't use the messy change macros for those simple and clear changes. Chapter is attached for review by the chapter committee! Please attach reviews to the tickets as comments!

mpiforumbot commented 8 years ago

Originally by htor on 2012-03-20 11:42:08 -0500


Attachment added: topol.pdf (439.9 KiB)

mpiforumbot commented 8 years ago

Originally by goodell on 2012-03-20 14:43:57 -0500


You skipped Bill's recommended change:

 \mpiarg{recvcounts}{\tt [l],} \mpiarg{recvtype} at any other process
-with {\tt srcs[l]==j}.
+

But I think it was correct to skip it :)

Otherwise it looks like you correctly implemented the suggested changes in the attached PDF. Furthermore, all of those changes made sense to me.

mpiforumbot commented 8 years ago

Originally by RolfRabenseifner on 2012-07-14 08:32:29 -0500


Review: All is implemented in the approved svn. Ticket can be closed with TEXT COMMITTED.