Parallelization of Heat Transfer problem with Lagrange Multipliers

Dear All,

Can someone please help me writing a parallel version of my heat transfer problem with Lagrange Multipliers? The sequential script works fine, but due to the requirement of large FE-Models, I need to solve it in parallel as well.

load "msh3";
// =========================================================
// Import mesh
mesh3 mCube=readmesh3("cube.mesh");
cout << "============================================================" << endl;
cout << "Mesh information:"                                            << endl;
cout << "Mesh volume                 = " << mCube.measure              << endl;
cout << "Mesh surface area           = " << mCube.bordermeasure        << endl;
cout << "Number of tetrahedron       = " << mCube.nt                   << endl;
cout << "Number of nodes             = " << mCube.nv                   << endl;
cout << "Number of boundary elements = " << mCube.nbe                  << endl;
cout << "============================================================" << endl;
// =========================================================
// Finite element space
fespace fCube(mCube, P1);
cout << "============================================================" << endl;
cout << "Finite element model information:"                            << endl;
cout << "Number of elements         = " << fCube.nt                    << endl;
cout << "Number of DOFs             = " << fCube.ndof                  << endl;
cout << "Number of DOFs per element = " << fCube.ndofK                 << endl;
cout << "============================================================" << endl;
// =========================================================
macro grad(T) [dx(T), dy(T), dz(T)] // End of Macro
// =========================================================
real Lambda = 235.00; // thermal conductivity
real Alpha  =   3.35; // heat transfer coefficient
real Te =  215.0 + 273.15;
real Tg = 3775.0 + 273.15;
// =========================================================
// Steady state heat transfer
real t0=clock();
// lagrange multipliers
int nBC = 0;
for (int i=0; i<fCube.ndof; i++)
{if (mCube(i).z < 1.0){nBC+=1;}}
cout << "Number of BCs = " << nBC << endl;
real[int, int] B(nBC, fCube.ndof); B=0.0;
real[int     ] V(nBC            ); V=0.0;
int Ctr=0;
for (int i=0; i<fCube.ndof; i++)
{
    if (Ctr < nBC && mCube(i).z < 1.0)
    {B(Ctr, i)=1.0; V(Ctr)=Tg; Ctr+=1;}
}
real t1=clock();
// Global matrices
varf vfHeatK(uT, vT)
    =int3d(mCube)(Lambda*grad(uT)'*grad(vT))
    +int2d(mCube, 2, qft=qf1pTlump)(Alpha*uT*vT);
varf vfHeatR(uT, vT)
    =int2d(mCube, 2, qft=qf1pTlump)(Alpha*Te*vT);
matrix    K=vfHeatK(fCube, fCube);
real[int] R=vfHeatR(0    , fCube);
matrix KK=[[K, B'],
           [B, 0 ]];
real[int] RR=[R, V];
real t2=clock();
set(KK, solver=sparsesolver);
real[int] Temp=KK^-1*RR;
real t3=clock();
cout << "Stiffness matrix, number of rows    = " << KK.n << endl;
cout << "Stiffness matrix, number of columns = " << KK.m << endl;
cout << "============================================================" << endl;
cout << "CPU time: Lagrange multipliers = " << (t1-t0) << endl;
cout << "CPU time: Global matrices      = " << (t2-t1) << endl;
cout << "CPU time: Solver               = " << (t3-t2) << endl;
cout << "============================================================" << endl;
// Write the results to a file
ofstream resultfile("cube.rdat");
resultfile.precision(6);
resultfile.fixed;
for (int i=0; i<fCube.ndof; i++)
{
    resultfile << setw( 6) << i+1     << ";"
            << setw(12) << mCube(i).x << ";"
            << setw(12) << mCube(i).y << ";"
            << setw(12) << mCube(i).z << ";"
            << setw(12) << Temp [i]   << endl;
}

Best Regards
Oliver

I think this example demonstrates the way to use Lagrange multipliers with PETSc.

This one is the exact equivalent of your sequential script.