Amesos Package Browser (Single Doxygen Collection) Development
Loading...
Searching...
No Matches
example_AmesosFactory_HB.cpp
Go to the documentation of this file.
1
2// @HEADER
3// ***********************************************************************
4//
5// Amesos: An Interface to Direct Solvers
6// Copyright (2004) Sandia Corporation
7//
8// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
9// license for use of this work by or on behalf of the U.S. Government.
10//
11// This library is free software; you can redistribute it and/or modify
12// it under the terms of the GNU Lesser General Public License as
13// published by the Free Software Foundation; either version 2.1 of the
14// License, or (at your option) any later version.
15//
16// This library is distributed in the hope that it will be useful, but
17// WITHOUT ANY WARRANTY; without even the implied warranty of
18// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19// Lesser General Public License for more details.
20//
21// You should have received a copy of the GNU Lesser General Public
22// License along with this library; if not, write to the Free Software
23// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
24// USA
25// Questions? Contact Michael A. Heroux (maherou@sandia.gov)
26//
27// ***********************************************************************
28// @HEADER
29
30#include "Amesos_ConfigDefs.h"
31
32// This example needs Galeri to generate the linear system.
33// You must have configured Trilinos with --enable-galeri
34// in order to compile this example
35
36#ifdef HAVE_MPI
37#include "mpi.h"
38#include "Epetra_MpiComm.h"
39#else
40#include "Epetra_SerialComm.h"
41#endif
42#include "Amesos.h"
43#include "Epetra_CrsMatrix.h"
44#include "Epetra_Import.h"
45#include "Epetra_Export.h"
46#include "Epetra_Map.h"
47#include "Epetra_MultiVector.h"
48#include "Epetra_Vector.h"
49#include "Epetra_LinearProblem.h"
50#include "Galeri_ReadHB.h"
51
52// ==================== //
53// M A I N D R I V E R //
54// ==================== //
55//
56// This example will:
57// 1.- Read an H/B matrix from file;
58// 2.- redistribute the linear system matrix to the
59// available processes
60// 3.- set up LHS/RHS if not present
61// 4.- create an Amesos_BaseSolver object
62// 5.- solve the linear problem.
63//
64// This example can be run with any number of processors.
65//
66// Author: Marzio Sala, ETHZ/COLAB
67// Last modified: Oct-05
68
69int main(int argc, char *argv[])
70{
71#ifdef HAVE_MPI
72 MPI_Init(&argc, &argv);
73 Epetra_MpiComm Comm(MPI_COMM_WORLD);
74#else
75 Epetra_SerialComm Comm;
76#endif
77
78 std::string matrix_file;
79 std::string solver_type;
80
81 if (argc > 1)
82 matrix_file = argv[1]; // read it from command line
83 else
84 matrix_file = "662_bus.rsa"; // file containing the HB matrix.
85
86 if (argc > 2)
87 solver_type = argv[2]; // read it form command line
88 else
89 solver_type = "Klu"; // default
90
91 if (Comm.MyPID() == 0)
92 std::cout << "Reading matrix `" << matrix_file << "'";
93
94 // ================= //
95 // reading HB matrix //
96 // ================= //
97
98 // HB files are for serial matrices. Hence, only
99 // process 0 reads this matrix (and if present
100 // solution and RHS). Then, this matrix will be redistributed
101 // using epetra capabilities.
102 // All variables that begin with "read" refer to the
103 // HB matrix read by process 0.
104 Epetra_Map* readMap;
105 Epetra_CrsMatrix* readA;
106 Epetra_Vector* readx;
107 Epetra_Vector* readb;
108 Epetra_Vector* readxexact;
109
110 try
111 {
112 Galeri::ReadHB(matrix_file.c_str(), Comm, readMap,
113 readA, readx, readb, readxexact);
114 }
115 catch(...)
116 {
117 std::cout << "Caught exception, maybe file name is incorrect" << std::endl;
118#ifdef HAVE_MPI
119 MPI_Finalize();
120#else
121 // not to break test harness
122 exit(EXIT_SUCCESS);
123#endif
124 }
125
126 // Create uniform distributed map.
127 // Note that linear map are used for simplicity only!
128 // Amesos (through Epetra) can support *any* map.
129 Epetra_Map* mapPtr = 0;
130
131 if(readMap->GlobalIndicesInt())
132 mapPtr = new Epetra_Map((int) readMap->NumGlobalElements(), 0, Comm);
133 else if(readMap->GlobalIndicesLongLong())
134 mapPtr = new Epetra_Map(readMap->NumGlobalElements(), 0, Comm);
135 else
136 assert(false);
137
138 Epetra_Map& map = *mapPtr;
139
140 // Create the distributed matrix, based on Map.
141 Epetra_CrsMatrix A(Copy, map, 0);
142
143 const Epetra_Map &OriginalMap = readA->RowMatrixRowMap() ;
144 assert (OriginalMap.SameAs(*readMap));
145 Epetra_Export exporter(OriginalMap, map);
146
147 Epetra_Vector x(map); // distributed solution
148 Epetra_Vector b(map); // distributed rhs
149 Epetra_Vector xexact(map); // distributed exact solution
150
151 // Exports from data defined on processor 0 to distributed.
152 x.Export(*readx, exporter, Add);
153 b.Export(*readb, exporter, Add);
154 xexact.Export(*readxexact, exporter, Add);
155 A.Export(*readA, exporter, Add);
156 A.FillComplete();
157
158 // deletes memory
159 delete readMap;
160 delete readA;
161 delete readx;
162 delete readb;
163 delete readxexact;
164 delete mapPtr;
165
166 // Creates an epetra linear problem, contaning matrix
167 // A, solution x and rhs b.
168 Epetra_LinearProblem Problem(&A,&x,&b);
169
170 // ======================================================= //
171 // B E G I N N I N G O F T H E A M E S O S P A R T //
172 // ======================================================= //
173
174 if (!Comm.MyPID())
175 std::cout << "Calling Amesos..." << std::endl;
176
177 // For comments on the commands in this section, please
178 // see file example_AmesosFactory.cpp.
179
180 Amesos_BaseSolver* Solver = 0;
181 Amesos Factory;
182
183 Solver = Factory.Create(solver_type,Problem);
184
185 // Factory.Create() returns 0 if the requested solver
186 // is not available
187 if (Solver == 0) {
188 std::cerr << "Selected solver (" << solver_type << ") is not available" << std::endl;
189 // return ok not to break test harness even if
190 // the solver is not available
191#ifdef HAVE_MPI
192 MPI_Finalize();
193#endif
194 return(EXIT_SUCCESS);
195 }
196
197 // Calling solve to compute the solution. This calls the symbolic
198 // factorization and the numeric factorization.
199 Solver->Solve();
200
201 // Print out solver timings and get timings in parameter list.
202 Solver->PrintStatus();
203 Solver->PrintTiming();
204
205 // Nothing done with timing information, so commenting out.
206
207 //Teuchos::ParameterList TimingsList;
208 //Solver->GetTiming( TimingsList );
209
210 // You can find out how much time was spent in ...
211 //double sfact_time, nfact_time, solve_time;
212 //double mtx_conv_time, mtx_redist_time, vec_redist_time;
213
214 // 1) The symbolic factorization
215 // (parameter doesn't always exist)
216 //sfact_time = TimingsList.get( "Total symbolic factorization time", 0.0 );
217
218 // 2) The numeric factorization
219 // (always exists if NumericFactorization() is called)
220 //nfact_time = Teuchos::getParameter<double>( TimingsList, "Total numeric factorization time" );
221
222 // 3) Solving the linear system
223 // (always exists if Solve() is called)
224 //solve_time = Teuchos::getParameter<double>( TimingsList, "Total solve time" );
225
226 // 4) Converting the matrix to the accepted format for the solver
227 // (always exists if SymbolicFactorization() is called)
228 //mtx_conv_time = Teuchos::getParameter<double>( TimingsList, "Total solve time" );
229
230 // 5) Redistributing the matrix for each solve to the accepted format for the solver
231 //mtx_redist_time = TimingsList.get( "Total matrix redistribution time", 0.0 );
232
233 // 6) Redistributing the vector for each solve to the accepted format for the solver
234 //vec_redist_time = TimingsList.get( "Total vector redistribution time", 0.0 );
235
236 // =========================================== //
237 // E N D O F T H E A M E S O S P A R T //
238 // =========================================== //
239
240 // Computes ||Ax - b|| //
241
242 double residual;
243
244 Epetra_Vector Ax(b.Map());
245 A.Multiply(false, x, Ax);
246 Ax.Update(1.0, b, -1.0);
247 Ax.Norm2(&residual);
248
249 if (!Comm.MyPID())
250 std::cout << "After Amesos solution, ||b - A * x||_2 = " << residual << std::endl;
251
252 // delete Solver. Do this before calling MPI_Finalize() because
253 // MPI calls can occur.
254 delete Solver;
255
256 if (residual > 1e-5)
257 return(EXIT_FAILURE);
258
259#ifdef HAVE_MPI
260 MPI_Finalize();
261#endif
262
263 return(EXIT_SUCCESS);
264
265} // end of main()
Amesos_BaseSolver: A pure virtual class for direct solution of real-valued double-precision operators...
virtual void PrintTiming() const =0
Prints timing information about the current solver.
virtual int Solve()=0
Solves A X = B (or AT x = B)
virtual void PrintStatus() const =0
Prints status information about the current solver.
Factory for binding a third party direct solver to an Epetra_LinearProblem.
Definition Amesos.h:44
Amesos_BaseSolver * Create(const char *ClassType, const Epetra_LinearProblem &LinearProblem)
Amesos Create method.
Definition Amesos.cpp:69
int main(int argc, char *argv[])