Skip to content

Commit 8671798

Browse files
authored
Merge pull request #87 from laetitia-m/feature/ls-skeleton-centralized
Feature/ls skeleton centralized
2 parents dec227b + e603016 commit 8671798

File tree

7 files changed

+178
-29
lines changed

7 files changed

+178
-29
lines changed

cmake/testing/pmmg_tests.cmake

Lines changed: 46 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -316,18 +316,58 @@ IF( BUILD_TESTING )
316316
##### Test isovalue mode - ls discretization
317317
#####
318318
###############################################################################
319+
# The following tests were to test the implementation of the ls option
320+
# Not relevant anymore
321+
# foreach( NP 1 2 4 8 )
322+
# add_test( NAME ls-arg-option-${NP}
323+
# COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $<TARGET_FILE:${PROJECT_NAME}>
324+
# ${CI_DIR}/LevelSet/3D-cube.mesh
325+
# -ls 0.01
326+
# -sol ${CI_DIR}/LevelSet/3D-cube-ls.sol
327+
# -out ${CI_DIR_RESULTS}/${MESH}-${NP}.o.mesh)
328+
# set(lsNotImplemented "## Error: level-set discretisation unavailable")
329+
# set_property(TEST ls-arg-option-${NP}
330+
# PROPERTY PASS_REGULAR_EXPRESSION "${lsNotImplemented}")
331+
# endforeach()
332+
333+
#--------------------------------
334+
#--- CENTRALIZED INPUT (CenIn)
335+
#--------------------------------
336+
# Tests of ls discretization for centralized mesh input
319337
foreach( NP 1 2 4 8 )
320-
add_test( NAME ls-arg-option-${NP}
338+
add_test( NAME ls-CenIn-${NP}
321339
COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $<TARGET_FILE:${PROJECT_NAME}>
322340
${CI_DIR}/LevelSet/3D-cube.mesh
323-
-ls 0.01
341+
-ls 0.0
324342
-sol ${CI_DIR}/LevelSet/3D-cube-ls.sol
325-
-out ${CI_DIR_RESULTS}/${MESH}-${NP}.o.mesh)
326-
set(lsNotImplemented "## Error: level-set discretisation unavailable")
327-
set_property(TEST ls-arg-option-${NP}
328-
PROPERTY PASS_REGULAR_EXPRESSION "${lsNotImplemented}")
343+
-out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-${NP}.o.mesh)
329344
endforeach()
330345

346+
# Tests for ls + met for centralized mesh input
347+
foreach( NP 1 2 4 8 )
348+
add_test( NAME ls-CenIn-met-${NP}
349+
COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $<TARGET_FILE:${PROJECT_NAME}>
350+
${CI_DIR}/LevelSet/3D-cube.mesh
351+
-ls 0.0
352+
-sol ${CI_DIR}/LevelSet/3D-cube-ls.sol
353+
-met ${CI_DIR}/LevelSet/3D-cube-metric.sol
354+
-out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-met-${NP}.o.mesh)
355+
endforeach()
356+
357+
# Tests of pvtu output when ls mode
358+
foreach( NP 1 2 4 8 )
359+
add_test( NAME ls-CenIn-DisOut-${NP}
360+
COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $<TARGET_FILE:${PROJECT_NAME}>
361+
${CI_DIR}/LevelSet/3D-cube.mesh
362+
-ls 0.0
363+
-sol ${CI_DIR}/LevelSet/3D-cube-ls.sol
364+
-out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-DisOut-${NP}.o.pvtu)
365+
endforeach()
366+
367+
#--------------------------------
368+
#--- DISTRIBUTED INPUT (DisIn)
369+
#--------------------------------
370+
331371

332372
###############################################################################
333373
#####

src/API_functionsf_pmmg.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,6 +1051,16 @@ FORTRAN_NAME(PMMG_PARMMGLIB_CENTRALIZED,pmmg_parmmglib_centralized,
10511051
return;
10521052
}
10531053

1054+
/**
1055+
* See \ref PMMG_parmmgls_centralized function in \ref libparmmg.h file.
1056+
*/
1057+
FORTRAN_NAME(PMMG_PARMMGLS_CENTRALIZED,pmmg_parmmgls_centralized,
1058+
(PMMG_pParMesh *parmesh,int* retval),
1059+
(parmesh,retval)) {
1060+
*retval = PMMG_parmmgls_centralized(*parmesh);
1061+
return;
1062+
}
1063+
10541064
/**
10551065
* See \ref PMMG_loadMesh function in \ref libparmmg.h file.
10561066
*/

src/libparmmg.c

Lines changed: 53 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ extern int (*PMMG_interp2bar)(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol oldMet,MMG
5555
int PMMG_check_inputData(PMMG_pParMesh parmesh)
5656
{
5757
MMG5_pMesh mesh;
58-
MMG5_pSol met;
58+
MMG5_pSol met,ls;
5959
int k;
6060

6161
if ( parmesh->info.imprim > PMMG_VERB_VERSION )
@@ -71,14 +71,14 @@ int PMMG_check_inputData(PMMG_pParMesh parmesh)
7171
" ## Error: lagrangian mode unavailable (MMG3D_IPARAM_lag):\n");
7272
return 0;
7373
} else if ( mesh->info.iso ) {
74-
fprintf(stderr," ## Error: level-set discretisation unavailable"
75-
" (MMG3D_IPARAM_iso):\n");
76-
return 0;
74+
fprintf(stderr,"\n\n ## WARNING: level-set discretisation under construction. \n\n");
75+
// return 0;
7776
} else if ( mesh->info.optimLES && met->size==6 ) {
7877
fprintf(stdout," ## Error: strong mesh optimization for LES methods"
7978
" unavailable (MMG3D_IPARAM_optimLES) with an anisotropic metric.\n");
8079
return 0;
8180
}
81+
8282
/* specific meshing */
8383
if ( met->np ) {
8484
if ( mesh->info.optim ) {
@@ -128,18 +128,23 @@ int PMMG_check_inputData(PMMG_pParMesh parmesh)
128128
int PMMG_preprocessMesh( PMMG_pParMesh parmesh )
129129
{
130130
MMG5_pMesh mesh;
131-
MMG5_pSol met;
131+
MMG5_pSol met,ls;
132+
int8_t tim;
133+
char stim[32];
134+
mytime ctim[TIMEMAX];
135+
132136

133137
mesh = parmesh->listgrp[0].mesh;
134138
met = parmesh->listgrp[0].met;
139+
ls = parmesh->listgrp[0].ls;
135140

136141
assert ( ( mesh != NULL ) && ( met != NULL ) && "Preprocessing empty args");
137142

138143
/** Function setters (must be assigned before quality computation) */
139144
MMG3D_Set_commonFunc();
140145

141146
/** Mesh scaling and quality histogram */
142-
if ( !MMG5_scaleMesh(mesh,met,NULL) ) {
147+
if ( !MMG5_scaleMesh(mesh,met,ls) ) {
143148
return PMMG_LOWFAILURE;
144149
}
145150

@@ -177,17 +182,45 @@ int PMMG_preprocessMesh( PMMG_pParMesh parmesh )
177182
return PMMG_STRONGFAILURE;
178183
}
179184

185+
/* Discretization of the isovalue */
186+
if (mesh->info.iso) {
187+
tim = 1;
188+
chrono(ON,&(ctim[tim]));
189+
if ( parmesh->info.imprim > PMMG_VERB_VERSION ) {
190+
fprintf(stdout,"\n -- PHASE 1a: ISOVALUE DISCRETIZATION \n");
191+
}
192+
if ( !MMG3D_mmg3d2(mesh,ls,met) ) {
193+
return PMMG_STRONGFAILURE;
194+
}
195+
chrono(OFF,&(ctim[tim]));
196+
printim(ctim[tim].gdif,stim);
197+
if ( parmesh->info.imprim > PMMG_VERB_VERSION ) {
198+
fprintf(stdout," -- PHASE 1a COMPLETED %s\n",stim);
199+
}
200+
}
201+
180202
/** Mesh analysis */
181203
if ( !MMG3D_analys(mesh) ) {
182204
return PMMG_STRONGFAILURE;
183205
}
184206

207+
/* Check if the LS has led to a non-manifold topology */
208+
if ( mesh->info.iso && !MMG3D_chkmani(mesh) ) {
209+
fprintf(stderr,"\n ## LS discretization: non-manifold initial topology. Exit program.\n");
210+
return PMMG_STRONGFAILURE;
211+
}
212+
else {
213+
if ( parmesh->info.imprim > PMMG_VERB_VERSION ) {
214+
fprintf(stdout," LS discretization OK: no non-manifold topology.\n");
215+
}
216+
}
217+
185218
if ( parmesh->info.imprim0 > PMMG_VERB_ITWAVES && (!mesh->info.iso) && met->m ) {
186219
PMMG_prilen(parmesh,0,1);
187220
}
188221

189222
/** Mesh unscaling */
190-
if ( !MMG5_unscaleMesh(mesh,met,NULL) ) {
223+
if ( !MMG5_unscaleMesh(mesh,met,ls) ) {
191224
return PMMG_STRONGFAILURE;
192225
}
193226

@@ -337,7 +370,7 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh )
337370

338371
int PMMG_distributeMesh_centralized_timers( PMMG_pParMesh parmesh,mytime *ctim ) {
339372
MMG5_pMesh mesh;
340-
MMG5_pSol met;
373+
MMG5_pSol met,ls;
341374
int ier,iresult;
342375
int8_t tim;
343376
char stim[32];
@@ -378,7 +411,8 @@ int PMMG_distributeMesh_centralized_timers( PMMG_pParMesh parmesh,mytime *ctim )
378411

379412
mesh = parmesh->listgrp[0].mesh;
380413
met = parmesh->listgrp[0].met;
381-
if ( (ier==PMMG_STRONGFAILURE) && MMG5_unscaleMesh( mesh, met, NULL ) ) {
414+
ls = parmesh->listgrp[0].ls;
415+
if ( (ier==PMMG_STRONGFAILURE) && MMG5_unscaleMesh( mesh, met, ls ) ) {
382416
ier = PMMG_LOWFAILURE;
383417
}
384418

@@ -1580,9 +1614,17 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) {
15801614
}
15811615

15821616
int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) {
1617+
return PMMG_parmmg_centralized(parmesh);
1618+
}
1619+
1620+
int PMMG_parmmgls_centralized(PMMG_pParMesh parmesh) {
1621+
return PMMG_parmmg_centralized(parmesh);
1622+
}
1623+
1624+
int PMMG_parmmg_centralized(PMMG_pParMesh parmesh) {
15831625
PMMG_pGrp grp;
15841626
MMG5_pMesh mesh;
1585-
MMG5_pSol met;
1627+
MMG5_pSol met,ls;
15861628
int ier;
15871629
int ierlib;
15881630
mytime ctim[TIMEMAX];
@@ -1619,6 +1661,7 @@ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) {
16191661
grp = &parmesh->listgrp[0];
16201662
mesh = grp->mesh;
16211663
met = grp->met;
1664+
ls = grp->ls;
16221665

16231666
/** Remeshing */
16241667
tim = 3;

src/libparmmg.h

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,46 @@ int PMMG_parmmglib_distributed(PMMG_pParMesh parmesh);
162162
**/
163163
int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh);
164164

165+
/**
166+
* \param parmesh pointer toward the parmesh structure (boundary entities are
167+
* stored into MMG5_Tria, MMG5_Edge... structures)
168+
*
169+
* \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can
170+
* return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and
171+
* we can't return a centralized and unscaled mesh.
172+
*
173+
* Main program for the parallel isovalue discretisation library for centralized
174+
* meshes
175+
*
176+
* \remark Fortran interface:
177+
* > SUBROUTINE PMMG_parmmgls_centralized(parmesh,retval)\n
178+
* > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n
179+
* > INTEGER, INTENT(OUT) :: retval\n
180+
* > END SUBROUTINE\n
181+
*
182+
**/
183+
int PMMG_parmmgls_centralized(PMMG_pParMesh parmesh);
184+
185+
/**
186+
* \param parmesh pointer toward the parmesh structure (boundary entities are
187+
* stored into MMG5_Tria, MMG5_Edge... structures)
188+
*
189+
* \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can
190+
* return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and
191+
* we can't return a centralized and unscaled mesh.
192+
*
193+
* Main program for the parallel isovalue discretisation library and remesh
194+
* library for centralized meshes
195+
*
196+
* \remark Fortran interface:
197+
* > SUBROUTINE PMMG_parmmg_centralized(parmesh,retval)\n
198+
* > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n
199+
* > INTEGER, INTENT(OUT) :: retval\n
200+
* > END SUBROUTINE\n
201+
*
202+
**/
203+
int PMMG_parmmg_centralized(PMMG_pParMesh parmesh);
204+
165205
/* init file names */
166206
/**
167207
* \param parmesh pointer toward a parmesh structure.

src/libparmmg1.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,6 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh )
707707

708708
/* renumerotation if available: no need to renum the field here (they
709709
* will be interpolated) */
710-
assert ( mesh->npi==mesh->np );
711710
if ( permNodGlob ) {
712711
if ( !MMG5_scotchCall(mesh,met,NULL,permNodGlob) )
713712
{

src/mpiunpack_pmmg.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,12 @@ int PMMG_mpiunpack_meshSizes ( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int igrp,
9292
ier_grp = MMG3D_Init_mesh(MMG5_ARG_start,
9393
MMG5_ARG_ppMesh,&(grp->mesh),
9494
MMG5_ARG_ppMet ,&(grp->met),
95+
MMG5_ARG_ppLs ,&(grp->ls),
9596
MMG5_ARG_end);
9697

9798
mesh = grp->mesh;
9899
met = grp->met;
100+
ls = grp->ls;
99101

100102
/* Set maximum memory */
101103
mesh->memMax = parmesh->memGloMax;

src/parmmg.c

Lines changed: 27 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -275,20 +275,35 @@ int main( int argc, char *argv[] )
275275
}
276276
}
277277
/* In iso mode: read metric if any */
278-
if ( grp->mesh->info.iso && parmesh->metin ) {
279-
if ( !distributedInput ) {
280-
iermesh = PMMG_loadMet_centralized( parmesh, parmesh->metin );
278+
if ( grp->mesh->info.iso) {
279+
if ( parmesh->metin ) {
280+
if ( !distributedInput ) {
281+
iermesh = PMMG_loadMet_centralized( parmesh, parmesh->metin );
282+
}
283+
else {
284+
int ier_loc = PMMG_loadMet_distributed( parmesh, parmesh->metin );
285+
MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm);
286+
}
287+
if ( -1 == iermesh ) {
288+
if ( rank == parmesh->info.root ) {
289+
fprintf(stderr,"\n ## ERROR: UNABLE TO LOAD METRIC.\n");
290+
}
291+
ier = 0;
292+
goto check_mesh_loading;
293+
}
281294
}
282295
else {
283-
int ier_loc = PMMG_loadMet_distributed( parmesh, parmesh->metin );
284-
MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm);
285-
}
286-
if ( -1 == iermesh ) {
287-
if ( rank == parmesh->info.root ) {
288-
fprintf(stderr,"\n ## ERROR: UNABLE TO LOAD METRIC.\n");
296+
/* Give a name to the metric if not provided for distributed metric output */
297+
if ( !MMG5_Set_inputSolName(grp->mesh,grp->met,"") ) {
298+
fprintf(stdout," ## WARNING: Unable to give a name to the metric.\n");
299+
}
300+
else {
301+
ier = PMMG_Set_name(parmesh,&parmesh->metin,grp->met->namein,"mesh.sol");
302+
if (!ier) {
303+
fprintf(stdout," ## ERROR: Unable to give a name to the metric.\n");
304+
PMMG_RETURN_AND_FREE( parmesh, PMMG_LOWFAILURE );
305+
}
289306
}
290-
ier = 0;
291-
goto check_mesh_loading;
292307
}
293308
}
294309

@@ -343,7 +358,7 @@ int main( int argc, char *argv[] )
343358
}
344359
else if ( !distributedInput ) {
345360
/* Parallel remeshing starting from a centralized mesh */
346-
ier = PMMG_parmmglib_centralized(parmesh);
361+
ier = PMMG_parmmg_centralized(parmesh);
347362
}
348363
else {
349364
/* Parallel remeshing starting from a distributed mesh */

0 commit comments

Comments
 (0)