Forum Message

 

 

We have moved the forum to https://forum.ngsolve.org . This is an archived version of the topics until 05/05/23. All the topics were moved to the new forum and conversations can be continued there. This forum is just kept as legacy to not invalidate old links. If you want to continue a conversation just look for the topic in the new forum.

Notice

The forum is in read only mode.

Adaptive mesh refinement with MPI (UPDATED)

More
1 year 4 months ago - 1 year 4 months ago #4556 by sigmunh
Im trying to convert the example for adaptive mesh refinement [1] to working with MPI, but im getting a segmentation fault when im calling the function

    mesh.Refine()

What is the correct way to do this? Do i need to 'collect' the mesh from the different cores, and run mesh.Refine() only on the rank=0 core (corresponding to that the mesh is distributed between the workers in the beginning)?

1: When run on two or more cores (mpirun -np 2 python3 adaptive.py), it creases with an segfault when the code reaches the line mesh.Refine().

[1] docu.ngsolve.org/v6.2.1808/whetting_the_appetite/adaptive.html

CODE
Code:
from ngsolve import * import ngsolve as ng from netgen.geom2d import SplineGeometry from ngsolve.krylovspace import CGSolver from mpi4py import MPI comm = MPI.COMM_WORLD #   point numbers 0, 1, ... 11 #   sub-domain numbers (1), (2), (3) #   # #             7-------------6 #             |             | #             |     (2)     | #             |             | #      3------4-------------5------2 #      |                           | #      |             11            | #      |           /   \           | #      |         10 (3) 9          | #      |           \   /     (1)   | #      |             8             | #      |                           | #      0---------------------------1 # def make_geometry():     geometry = SplineGeometry()          # point coordinates ...     pnts = [ (0,0), (1,0), (1,0.6), (0,0.6), \              (0.2,0.6), (0.8,0.6), (0.8,0.8), (0.2,0.8), \              (0.5,0.15), (0.65,0.3), (0.5,0.45), (0.35,0.3) ]     pnums = [geometry.AppendPoint(*p) for p in pnts]          # start-point, end-point, boundary-condition, domain on left side, domain on right side:     lines = [ (0,1,1,1,0), (1,2,2,1,0), (2,5,2,1,0), (5,4,2,1,2), (4,3,2,1,0), (3,0,2,1,0), \               (5,6,2,2,0), (6,7,2,2,0), (7,4,2,2,0), \               (8,9,2,3,1), (9,10,2,3,1), (10,11,2,3,1), (11,8,2,3,1) ]              for p1,p2,bc,left,right in lines:         geometry.Append( ["line", pnums[p1], pnums[p2]], bc=bc, leftdomain=left, rightdomain=right)     return geometry if comm.size > 1:     if comm.rank == 0:         mesh = make_geometry().GenerateMesh(maxh=0.2)         mesh = ng.Mesh(mesh.Distribute(comm))     else:         mesh = ng.Mesh(netgen.meshing.Mesh.Receive(comm)) else:     mesh = ng.Mesh(make_geometry().GenerateMesh(maxh=0.2)) vtk = ng.VTKOutput(     ma=mesh,     coefs=,     names=,     filename=f"mesh-rank-{comm.rank}",     subdivision=0) vtk.Do() fes = H1(mesh, order=3, dirichlet=[1]) u = fes.TrialFunction() v = fes.TestFunction() # one heat conductivity coefficient per sub-domain lam = CoefficientFunction([1, 1000, 10]) # a = BilinearForm(fes, symmetric=False) # a += SymbolicBFI(lam*grad(u)*grad(v)) a = BilinearForm(lam*grad(u)*grad(v) * dx) pre = ng.Preconditioner(a, "local") # a.Assemble() # heat-source in sub-domain 3 f = LinearForm(fes) f += SymbolicLFI(CoefficientFunction([0, 0, 1])*v) # c = Preconditioner(a, type="multigrid", inverse = "sparsecholesky") gfu = GridFunction(fes) # Draw(gfu) # finite element space and gridfunction to represent # the heatflux: space_flux = HDiv(mesh, order=2) gf_flux = GridFunction(space_flux, "flux") def SolveBVP(i):     fes.Update()     gfu.Update()     a.Assemble()     f.Assemble()     inv = CGSolver(a.mat, pre.mat, printrates=comm.rank == 0, maxiter=200, tol=1e-8)     gfu.vec.data = inv * f.vec     vtk = ng.VTKOutput(         ma=mesh,         coefs=[gfu],         names=["Temperature (K)"],         filename=f"results-{i}",         subdivision=0)     vtk.Do()     # Redraw(blocking=True) l = def CalcError():     space_flux.Update()     gf_flux.Update()     flux = lam * grad(gfu)     # interpolate finite element flux into H(div) space:     gf_flux.Set(flux)     # Gradient-recovery error estimator     err = 1/lam*(flux-gf_flux)*(flux-gf_flux)     elerr = Integrate (err, mesh, VOL, element_wise=True)     if len(elerr) == 0:         maxerr = 0     else:         maxerr = max(elerr)          maxerr = comm.allreduce(maxerr, MPI.MAX)     comm.Barrier()     l.append ( (fes.ndof, sqrt(sum(elerr)) ))     i = 0     i_nr = 0     for el in mesh.Elements():         i_nr += 1         if elerr[el.nr] > 0.25*maxerr:             i += 1         mesh.SetRefinementFlag(el, elerr[el.nr] > 0.25*maxerr)     print("# ", i, "number of elements refined. total=", i_nr) with TaskManager():     i = 0     while fes.ndof < 100000:           print(i)         i += 1         if i == 20:             break         SolveBVP(i)         CalcError()         print("before", comm.rank)         mesh.Refine()         print("after", comm.rank)         print()         print("# num elements after refinement =", len(list(mesh.Elements())))         vtk = ng.VTKOutput(             ma=mesh,             coefs=,             names=,             filename=f"mesh-{i}",             subdivision=0)         vtk.Do() import matplotlib.pyplot as plt plt.yscale('log') plt.xscale('log') plt.xlabel("ndof") plt.ylabel("H1 error-estimate") ndof,err = zip(*l) plt.plot(ndof,err, "-*") plt.ion() plt.savefig("hei.pdf")

 
Last edit: 1 year 4 months ago by sigmunh.
Time to create page: 0.154 seconds