I am attempting to use a parallel approach for Picard iteration to solve lid-driven cavity flow, but the program runs correctly with a single core and produces errors when using multiple cores. I suspect it may be an issue with boundary condition configuration, but I haven’t found any relevant tutorials addressing this.
Thanks.
This is my code:
load "PETSc"
macro dimension()2// EOM // 2D or 3D
include "macro_ddm.idp"
real ccc=1.5;//2.;
macro fff(x) ((1+tanh(ccc*(x*2-1))/tanh(ccc))/2)//
mesh Th=square(50,20,[x*2.0*pi/1.99,fff(y)]);
buildDmesh(Th);
//int[int] labPeriodic = [2, 4];
// macro Pk() [P2, P2, P1, P2], periodic=[[labPeriodic[0],y], [labPeriodic[1],y]]// EOM
macro trueRestrict()true// EOM
macro Pk() [P2, P2, P1, P2]//EOM
//macro ThPeriodicity()labPeriodic//
macro def(i)[i, i#B, i#C, i#D]//EOM
macro init(i)[i, i, i, i]//EOM
// real ct;
fespace Wh(Th, Pk);
Mat A;
int[int] n2o;
macro ThN2O()n2o//
MatCreate(Th, A, Pk);
Wh [u1,u2,p,T],[up1,up2,pp,Tp],[v1,v2,q,TT];
[u1,u2,p,T] = [0,0,0,1-y];
fespace Zh(Th, P2);
Zh ou =T;
macro def1(i)i//
plotMPI(Th, ou, P2, def1, real, cmm = "T");
macro grad(u) [dx(u),dy(u)]//
macro div(u1,u2) (dx(u1)+dy(u2))//
macro Vgradu(v1,v2,u) (grad(u)'*[v1,v2])//
macro Grad(u1,u2) [grad(u1),grad(u2)]//
macro VgradU(v1,v2,u1,u2) [Vgradu(v1,v2,u1),Vgradu(v1,v2,u2)]//
real Pr = 100;
real eps = 1e-8;
//real cNewton = 0;
real Ma = 1e4;
real err;
varf aa([u1,u2,p,T],[v1,v2,q,TT]) = int2d(Th)(VgradU(up1,up2,u1,u2)'*[v1,v2]/Pr
+ grad(T)'*grad(TT)
+ Vgradu(up1,up2,T)*TT
+ (Grad(u1,u2):Grad(v1,v2))
- p * div(v1,v2) - q *div(u1,u2) - eps*p*q )
// + int1d(Th,3)(Ma*dx(T)*v1)
// + int1d(Th,3)(Ma*1*v1)
// + int1d(Th,3)((1)*TT)
// + on(1,u1=0)
// + on(1,3,u2=0)
// + on(1,T=1);
+on(1,2,3,4,u2=0)
+on(1,2,4,u1=0)
+on(3,u1=1)
;
varf bb([u1,u2,p,T],[v1,v2,q,TT]) = int2d(Th)(0*v1)
// - int1d(Th,3)(Ma*dx(T)*v1)
// - int1d(Th,3)(Ma*1*v1)
// - int1d(Th,3)((1)*TT)
// + on(1,u1=0)
// + on(1,3,u2=0);
//+on(1,T=1)
+on(1,2,3,4,u2=0)
+on(1,2,4,u1=0)
+on(3,u1=1)
;
// varf aa([u1,u2,p],[v1,v2,q]) = int2d(Th)(dx(u1)*dx(v1)+dy(u1)*dy(v1) +dx(u2)*dx(v2)+dy(u2)*dy(v2) - p*(dx(v1)+dy(v2))) +on(1,2,3,4,u2 = 0) +on(1,2,4,u1 = 0) + on(3,u1=1);
// varf bb([u1,u2,p],[v1,v2,q]) = on(1,2,3,4,u2 = 0) +on(1,2,4,u1 = 0) + on(3,u1=1);
for(int step=0; step < 4000; step++)
{
real ct = clock();
up1[]= u1[];
A = aa(Wh,Wh);
real[int] b = bb(0,Wh);
u1[]=A^-1*b;
// real[int] bPETSc, xPETSc;
// ChangeNumbering(A,b,bPETSc); //Change Numbering from Normal vector to PETSc vector (bPETSc). To be used by the Krylov Solver.
// xPETSc.resize(bPETSc.n); //Resize the solution vector.
// KSPSolve(A,bPETSc,xPETSc); //Using the KSP Solve Routine from PETSc. The solution is stored in xPETSc;
// ChangeNumbering(A,u1[],xPETSc,inverse=true,exchange=true); //Change from PETSc vector to normal vector.
ct = clock() - ct;
cout<<ct<<endl;
fespace nh(Th, [P2, P2]);
nh [ou1, ou2] = [u1, u2];
macro def2(i)[i, i#B]//
plotMPI(Th, [ou1,ou2], [P2, P2], def2, real, cmm = "u");
// // plotMPI(Th, ou, P2, def1, real, cmm = "T");
}