I tried to modify boundary condition in natural-convection-fieldsplit-2d-PETSc.edp to have a periodic boundary pair [2,4].
But it reports an error in the line set(dA, fieldsplit = 0...
How to fix it?
Best Regards.
I tried to modify boundary condition in natural-convection-fieldsplit-2d-PETSc.edp to have a periodic boundary pair [2,4].
But it reports an error in the line set(dA, fieldsplit = 0...
How to fix it?
Best Regards.
You need to build the domain decomposition using the Periodicity
macro, see, for example, FreeFem-sources/examples/hpddm/diffusion-periodic-2d-PETSc.edp at b1e524c8ca6a9b44cb9eff780459bacfc12a3ad7 · FreeFem/FreeFem-sources · GitHub.
Yes, I know that macro, it is added in the code:
int[int] labPeriodic = [2,4];
macro Pk()[P2, P2, P1, P1], periodic=[[labPeriodic[0],y], [labPeriodic[1],y]]//
and boundary condition is changed in varf.
This is the script:
natural-convection-periodic-fieldsplit-2d-PETSc.edp (5.3 KB)
I don’t want to do mesh adaptation so I assigned 0 to adaptation
, and I want to set preconditioner so I replace if(adaptation || nu == nuCurrent)
by if(1)
.
On my windows laptop, the script can only run the first step of the SNES with mpiexec -n 1
, and throws an error immediately with ‘-n 2’.
current line = 434 mpirank 0 / 2
current line = 434 mpirank 1 / 2
Exec error : Periodic: the both number of edges is not the same
-- number :1
[0]PETSC ERROR: ------------------------------------------------------------------------
[0]PETSC ERROR: Caught signal number 11 SEGV: Segmentation Violation, probably memory access out of range
[0]PETSC ERROR: Try option -start_in_debugger or -on_error_attach_debugger
[0]PETSC ERROR: or see https://petsc.org/release/faq/#valgrind and https://petsc.org/release/faq/
[0]PETSC ERROR: configure using --with-debugging=yes, recompile, link, and run
[0]PETSC ERROR: to get more information on the crash.
[0]PETSC ERROR: Run with -malloc_debug to check if memory corruption is causing the crash.
job aborted:
[ranks] message
[0] application aborted
aborting MPI_COMM_WORLD (comm=0x44000000), error 59, comm rank 0
[1] terminated
---- error analysis -----
[0] on LAPTOP-HMTO18GH
FreeFem++-mpi.exe aborted the job. abort code 59
Please help me resolve this issue.
No, it is not added as it should.
Sorry I can’t debug it myself.
I have now added the macro at the same location as in the diffusion-periodic-2d-PETSc.edp and removed the old API, but the error still occurs.
// run with MPI: ff-mpirun -np 4 script.edp
// NBPROC 4
// sequential script from https://www.um.es/freefem/ff++/pmwiki.php?n=Main.Newton3D
// Navier--Stokes block solved using a modified augmented Lagrangian preconditioner,
// cf. [Moulin et al. 2019] (https://github.com/prj-/moulin2019al)
load "PETSc" // PETSc plugin
macro dimension()2// EOM // 2D or 3D
macro trueRestrict()true// EOM
macro removeZeros()true// EOM
include "macro_ddm.idp" // additional DDM functions
mesh Th = square(getARGV("-global", 30), getARGV("-global", 30));
real gammaAL = 0.3;
macro grad(u)[dx(u), dy(u)]//
macro Grad(u)[grad(u#1), grad(u#2)]//
macro div(u)(dx(u#1) + dy(u#2))//
macro ugrad(u, v)([u#1, u#2]' * grad(v))//
macro UgradV(u, v, T)[ugrad(u, v#1), ugrad(u, v#2), ugrad(u, T)]//
int[int] labPeriodic = [2,4];
macro Pk() [P2, P2, P1, P1], periodic=[[labPeriodic[0],y], [labPeriodic[1],y]]//
macro Pi() P1, periodic=[[labPeriodic[0],y], [labPeriodic[1],y]]//
macro def(i)[i, i#B, i#C, i#D]// EOM // vector field definition
macro init(i)[i, i, i, i]// EOM // vector field initialization
real nu = 1/100.0;
real nuFinal = 1/15000.0;
real Pr = 56.2;
real k = 1/Pr;
real nuCurrent = nu;
mesh ThBackup=Th;
Mat dA;
int[int] n2o;
macro ThPeriodicity()labPeriodic//
macro ThN2O()n2o//
DmeshCreate(Th);
MatCreate(Th, dA, Pk);
fespace Wh(Th, Pk);
Wh [u1, u2, p, T] = [0, 0, 0, 1-y];
real c;
varf vPb([uw1, uw2, pw, Tw], [v1, v2, q, TT])
= int2d(Th)(gammaAL * div(uw) * div(v) + UgradV(u, uw, Tw)' * [v1, v2, TT] + UgradV(uw, u, T)' * [v1, v2, TT]
+ (Grad(uw) : Grad(v)) * nuCurrent
+ c * Tw * v2 + grad(Tw)' * grad(TT) * k - div(uw) * q - div(v) * pw)
+ int2d(Th)(gammaAL * div(u) * div(v) + UgradV(u, u, T)' * [v1, v2, TT]
+ (Grad(u) : Grad(v)) * nuCurrent
+ c * T * v2 + grad(T)' * grad(TT) * k - div(u) * q - div(v) * p)
+ on(1, 3, uw1 = 0, uw2 = 0) + on(1, Tw = 1)+ on( 3, Tw = 0);
varf vS(p, q) = int2d(Th, qforder = 3)(-1/(gammaAL + nuCurrent) * p * q);
//Mat dA(Wh.ndof, intersection, D);
func real[int] funcRes(real[int]& inPETSc) {
ChangeNumbering(dA, u1[], inPETSc, inverse = true, exchange = true);
real[int] out(Wh.ndof);
out = vPb(0, Wh, tgv = -2);
real[int] outPETSc;
ChangeNumbering(dA, out, outPETSc);
return outPETSc;
}
func int funcJ(real[int]& inPETSc) {
ChangeNumbering(dA, u1[], inPETSc, inverse = true, exchange = true);
dA = vPb(Wh, Wh, tgv = -2);
return 0;
}
while(nuCurrent > nuFinal) {
Wh [list1, list2, listP, listT] = [1.0, 1.0, 1.0, 2.0];
set(dA, sparams = "-ksp_monitor -ksp_max_it 100 -ksp_type fgmres -ksp_rtol 1.0e-2 -pc_type fieldsplit -fieldsplit_1_ksp_type gmres -fieldsplit_1_pc_type hypre", fields = list1[]);
[list1, list2, listP, listT] = [1.0, 1.0, 2.0, 0.0];
fespace Qh(Th, Pi);
Qh listS;
listS[]= 1:Qh.ndof;
Wh [uw1,uw2,pw,Tw] = [0.0, 0.0, listS, 0.0];
matrix[int] S(1);
S[0] = vS(Qh, Qh);
string[int] names(2);
names[0] = "v";
names[1] = "p";
string paramsNS = "-prefix_push fieldsplit_0_ -prefix_push fieldsplit_v_ -pc_type lu -prefix_pop -prefix_push fieldsplit_p_ -ksp_type cg -ksp_max_it 5 -pc_type jacobi -prefix_pop -ksp_type fgmres " + " -ksp_rtol 1.0e-2 -ksp_gmres_restart 200 -prefix_pop";
set(dA, fieldsplit = 0, sparams = "-fieldsplit_0_ksp_type fgmres -fieldsplit_0_ksp_max_it 100 -fieldsplit_0_ksp_rtol 1.0e-1 -fieldsplit_0_pc_type fieldsplit " + paramsNS, fields = list1[], schurPreconditioner = S, schurList = uw1[], names = names);
c = -1/(nuCurrent * Pr);
real[int] xPETSc;
ChangeNumbering(dA, u1[], xPETSc);
SNESSolve(dA, funcJ, funcRes, xPETSc, sparams = "-snes_monitor -snes_type newtonls -snes_rtol 1.0e-6");
ChangeNumbering(dA, u1[], xPETSc, inverse = true, exchange = 1);
if(!NoGraphicWindow) {
fespace Vh(Th, P1);
Vh only = T;
macro def1(i)i//
plotMPI(Th, only, P1, def1, real, cmm = "Global temperature for viscosity = " + nuCurrent);
only = p;
plotMPI(Th, only, P1, def1, real, cmm = "Global pressure for viscosity = " + nuCurrent);
fespace Zh(Th, [P2, P2]);
Zh [onlyU, onlyV] = [u1, u2];
macro def2(i)[i, i#B]//
plotMPI(Th, [onlyU, onlyV], [P2, P2], def2, real, cmm = "Global velocity for viscosity = " + nuCurrent);
}
nuCurrent = max(nuFinal, nuCurrent/2.0);
}
Could you please help me debug the code?
Do not put the solver stuff in the while
loop, then the code will run, though I think it is wrong.
0 SNES Function norm 5.477260662444e+00
0 KSP Residual norm 5.477260662444e+00
1 KSP Residual norm 1.468227003738e-05
0 SNES Function norm 5.477365923274e+00
0 KSP Residual norm 5.477365923274e+00
1 KSP Residual norm 2.984325953078e-05
1 SNES Function norm 5.477365923274e+00
[...]