#include "ml_config.h"
#include "ml_common.h"
#ifdef HAVE_ML_MLAPI
#include "Teuchos_RCP.hpp"
#include "Teuchos_CommandLineProcessor.hpp"
#include "EpetraExt_CrsMatrixIn.h"
#include "EpetraExt_VectorIn.h"
#include "Epetra_Vector.h"
using namespace Teuchos;
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
Teuchos::CommandLineProcessor clp;
std::string matrixFile; clp.setOption("matrix", &matrixFile, "Matrix to solve");
std::string rhsFile; clp.setOption("rhs", &rhsFile, "RHS to solve");
int AdditionalCandidates=1; clp.setOption("numvecs",&AdditionalCandidates, "Number of additional adaptive candidate vectors");
int NumPDEs=1; clp.setOption("numpdes",&NumPDEs, "Number of PDE equations");
clp.recogniseAllOptions(true);
switch (clp.parse(argc, argv)) {
case Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED: return EXIT_SUCCESS;
case Teuchos::CommandLineProcessor::PARSE_ERROR:
case Teuchos::CommandLineProcessor::PARSE_UNRECOGNIZED_OPTION: return EXIT_FAILURE;
case Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL: break;
}
try {
Init();
RCP<Operator> A;
RCP<Epetra_Vector> rhsE;
if(!matrixFile.empty()) {
EpetraExt::MatrixMarketFileToCrsMatrix(matrixFile.c_str(),GetEpetra_Comm(),temp);
A = rcp(
new Operator(DomainSpace,RangeSpace,temp));
if(!rhsFile.empty()) {
EpetraExt::MatrixMarketFileToVector(rhsFile.c_str(),temp->
RowMap(),tempV);
rhsE = rcp(tempV);
}
}
else {
int NX = 1000;
{
for (int i = 0 ; i < NX ; ++i)
{
(*Atemp)(2*i, 2*i) = 2.0;
(*Atemp)(2*i+1, 2*i+1) = 2.0;
if (i)
{
(*Atemp)(2*i, 2*(i - 1)) = - 1.0;
(*Atemp)(2*i+1, 2*(i - 1)+1) = - 1.0;
}
if (i != NX - 1)
{
(*Atemp)(2*i, 2*(i + 1)) = - 1.0;
(*Atemp)(2*i+1, 2*(i + 1)+1) = - 1.0;
}
}
}
Atemp->FillComplete();
A = rcp(Atemp);
}
int MaxLevels = 10;
Teuchos::ParameterList List;
List.set("additional candidates", AdditionalCandidates);
List.set("PDE equations",NumPDEs);
List.set("use default null space", true);
List.set("krylov: type", "cg");
RCP<MultiLevelAdaptiveSA> Prec;
bool UseDefaultNullSpace = true;
Prec->AdaptCompute(UseDefaultNullSpace, AdditionalCandidates);
if(!rhsE.is_null()) {
for(int i=0; i<rhsE->MyLength(); i++)
RHS(i,0) = (*rhsE)[i];
LHS = 0.0;
}
else {
LHS.Random();
RHS = 0.0;
}
Krylov(*A, LHS, RHS, *Prec, List);
}
catch (const int e) {
std::cerr << "Caught integer exception, code = " << e << std::endl;
}
catch (...) {
std::cerr << "Caught exception..." << std::endl;
}
#ifdef HAVE_MPI
MPI_Finalize() ;
#endif
return(0);
}
#else
#include "ml_include.h"
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
puts("This MLAPI example requires the following configuration options:");
puts("\t--enable-epetra");
puts("\t--enable-teuchos");
puts("\t--enable-ifpack");
puts("\t--enable-amesos");
puts("Please check your configure line.");
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return(0);
}
#endif
MLAPI wrapper for Epetra_FECrsMatrix, which allows MATLAB-like syntax.
Overloaded operators for MultiVector's, Operator's, and InverseOpereator's.
MLAPI interface to AztecOO's solvers.
Adaptive smoothed aggregation preconditioner.
MLAPI wrapper for double vectors.
Basic class to define operators within MLAPI.
Class to specify the number and distribution among processes of elements.
const Epetra_Map & RowMap() const
int FillComplete(bool OptimizeDataStorage=true)
const Epetra_Map & RangeMap() const
const Epetra_Map & DomainMap() const
Definition: MLAPI_DistributedMatrix.h:29
Black-box multilevel adaptive smoothed aggregation preconditioner.
Definition: MLAPI_MultiLevelAdaptiveSA.h:91
Basic class for distributed double-precision vectors.
Definition: MLAPI_MultiVector.h:103
Operator: basic class to define operators within MLAPI.
Definition: MLAPI_Operator.h:44
Specifies the number and distribution among processes of elements.
Definition: MLAPI_Space.h:40
MLAPI: Default namespace for all MLAPI objects and functions.
Definition: MLAPI_Aggregation.h:24
void SetPrintLevel(int Level)
Sets the level of output (from 0 to 10, 0 being verbose).
int GetMyPID()
Returns the ID of the calling process.
void Finalize()
Destroys the MLAPI workspace.