FEI Package Browser (Single Doxygen Collection) Version of the Day
Loading...
Searching...
No Matches
snl_fei_BlkSizeMsgHandler.cpp
Go to the documentation of this file.
1/*--------------------------------------------------------------------*/
2/* Copyright 2005 Sandia Corporation. */
3/* Under the terms of Contract DE-AC04-94AL85000, there is a */
4/* non-exclusive license for use of this work by or on behalf */
5/* of the U.S. Government. Export of this program may require */
6/* a license from the United States Government. */
7/*--------------------------------------------------------------------*/
8
9#include <fei_macros.hpp>
10
12
13#include <fei_utils.hpp>
14
15#include <snl_fei_Utils.hpp>
16#include <fei_FieldMask.hpp>
18#include <fei_VectorSpace.hpp>
19#include <fei_ParameterSet.hpp>
20#include <fei_Graph.hpp>
22#include <fei_TemplateUtils.hpp>
23
24#include <fei_EqnBuffer.hpp>
25#include <fei_EqnCommMgr.hpp>
26#include <SNL_FEI_Structure.hpp>
27
28#undef fei_file
29#define fei_file "snl_fei_BlkSizeMsgHandler.cpp"
30#include <fei_ErrMacros.hpp>
31
32//----------------------------------------------------------------------------
34 fei::Graph* graph,
35 MPI_Comm comm)
36 : remote_colIndices_(NULL),
37 local_colIndices_(NULL),
38 vecSpace_(vspace),
39 ptBlkMap_(NULL),
40 graph_(graph),
41 comm_(comm),
42 sendProcs_(0, 64),
43 recvProcs_(0, 64),
44 firstExchange_(true)
45{
48
49 ptBlkMap_ = vspace->getPointBlockMap();
50}
51
52//----------------------------------------------------------------------------
54{
55 delete remote_colIndices_;
56 delete local_colIndices_;
57}
58
59//----------------------------------------------------------------------------
61{
62 int local_proc = fei::localProc(comm_);
63 if (fei::numProcs(comm_) < 2) {
64 return(0);
65 }
66
67 fei::Graph::table_type* localgraph = graph_->getLocalGraph();
69 g_iter = localgraph->begin(),
70 g_end = localgraph->end();
71
72 //First create a table that maps remote processors to column-indices from our
73 //graph.
74 //These are remotely-owned column-indices for which we will need block-sizes.
75
76 for(; g_iter != g_end; ++g_iter) {
77 fei::Graph::table_type::row_type* row = (*g_iter).second;
78
79 fei::Graph::table_type::row_type::const_iterator
80 iter = row->begin(),
81 iter_end = row->end();
82
83 int owner;
84
85 for(; iter != iter_end; ++iter) {
86 int col = *iter;
87 owner = vecSpace_->getOwnerProcBlkIndex(col);
88
89 if (owner != local_proc) {
90 remote_colIndices_->addIndices(owner, 1, &col);
91 }
92 }
93 }
94
95 //Next, we need to send our lists of remotely-owned column-indices to the
96 //owning processors. After that, those processors can respond by sending us
97 //the sizes for those column-indices.
98 fei::copyKeysToVector(remote_colIndices_->getMap(), sendProcs_);
99
100 CHK_ERR( fei::mirrorProcs(comm_, sendProcs_, recvProcs_) );
101
102 firstExchange_ = true;
103
104 CHK_ERR( fei::exchange(comm_, this) );
105
106 firstExchange_ = false;
107
108 CHK_ERR( fei::exchange(comm_, this) );
109
110 return(0);
111}
112
113//----------------------------------------------------------------------------
115{
116 if (firstExchange_) {
117 return(sendProcs_);
118 }
119 else {
120 return(recvProcs_);
121 }
122}
123
124//----------------------------------------------------------------------------
126{
127 if (firstExchange_) {
128 return(recvProcs_);
129 }
130 else {
131 return(sendProcs_);
132 }
133}
134
135//----------------------------------------------------------------------------
137 int& messageLength)
138{
139 if (firstExchange_) {
140 fei::comm_map::row_type* cols = remote_colIndices_->getRow(destProc);
141 messageLength = cols->size();
142 return(0);
143 }
144 else {
145 fei::comm_map::row_type* cols = local_colIndices_->getRow(destProc);
146 messageLength = cols->size()*2;
147 return(0);
148 }
149}
150
151//----------------------------------------------------------------------------
153 std::vector<int>& message)
154{
155 if (firstExchange_) {
156 fei::comm_map::row_type* cols = remote_colIndices_->getRow(destProc);
157 message.resize(cols->size());
158 fei::copySetToArray(*cols, message.size(), &message[0]);
159 return(0);
160 }
161 else {
162 fei::comm_map::row_type* cols = local_colIndices_->getRow(destProc);
163
164 message.resize(cols->size()*2);
165
166 fei::comm_map::row_type::const_iterator
167 iter = cols->begin(),
168 iter_end = cols->end();
169
170 int offset = 0;
171 for(; iter != iter_end; ++iter) {
172 CHK_ERR( ptBlkMap_->getBlkEqnInfo(*iter,
173 message[offset], message[offset+1]) );
174 offset += 2;
175 }
176
177 return( 0 );
178 }
179}
180
181//----------------------------------------------------------------------------
183 std::vector<int>& message)
184{
185 if (firstExchange_) {
186 for(unsigned i=0; i<message.size(); ++i) {
187 local_colIndices_->addIndices(srcProc, 1, &(message[i]));
188 }
189 }
190 else {
191 fei::comm_map::row_type* cols = remote_colIndices_->getRow(srcProc);
192 fei::comm_map::row_type::const_iterator
193 iter = cols->begin(),
194 iter_end = cols->end();
195
196 int offset = 0;
197 for(; iter != iter_end; ++iter) {
198 int ptEqn = message[offset];
199 int blkSize = message[offset+1];
200 for(int i=0; i<blkSize; ++i) {
201 CHK_ERR( ptBlkMap_->setEqn(ptEqn+i, *iter, blkSize) );
202 }
203 offset += 2;
204 }
205 }
206
207 return(0);
208}
209
snl_fei::PointBlockMap * getPointBlockMap()
int processRecvMessage(int srcProc, std::vector< int > &message)
BlkSizeMsgHandler(fei::VectorSpace *vspace, fei::Graph *graph, MPI_Comm comm)
int getSendMessageLength(int destProc, int &messageLength)
int getSendMessage(int destProc, std::vector< int > &message)
MAP_TYPE::iterator iterator
#define CHK_ERR(a)
#define MPI_Comm
Definition fei_mpi.h:56
int localProc(MPI_Comm comm)
void copySetToArray(const SET_TYPE &set_obj, int lenList, int *list)
void copyKeysToVector(const MAP_TYPE &map_obj, std::vector< int > &keyvector)
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
int exchange(MPI_Comm comm, MessageHandler< T > *msgHandler)
snl_fei::RaggedTable< std::map< int, std::set< int > * >, std::set< int > > comm_map
int numProcs(MPI_Comm comm)