MFIX  2016-1
comp_mean_fields0.f
Go to the documentation of this file.
1 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv!
2 !^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^!
3  SUBROUTINE comp_mean_fields0
4 
5 ! Modules
6  USE bc
7  USE compar
8  USE constant
9  USE cutcell
10  USE derived_types, only: pic
11  USE desmpi
12  USE discretelement
13  USE drag
14  USE fldvar
15  USE functions, only: fluid_at
16  USE functions, only: funijk
17  USE functions, only: is_on_mype_wobnd
18  USE geometry
19  USE indices
20  USE interpolation
21  USE mfix_pic
23  USE mpi_utility
24  USE parallel
25  USE param
26  USE param1
28  USE physprop, only: mmax
29  USE run, only: solids_model
30  USE sendrecv
31 
32  IMPLICIT NONE
33 ! Local variables
34 !---------------------------------------------------------------------//
35 ! general i, j, k indices
36  INTEGER :: I, J, K, IJK, &
37  II, JJ, KK
38  INTEGER :: I1, I2, J1, J2, K1, K2
39  INTEGER :: IDIM, IJK2
40  INTEGER :: CUR_IJK
41 ! indices used for interpolation stencil (unclear why IE, JN, KTP are
42 ! needed)
43  INTEGER :: IW, IE, JS, JN, KB, KTP
44 ! i,j,k indices of the fluid cell the particle resides in minus 1
45 ! (e.g., shifted 1 in west, south, bottom direction)
46  INTEGER, DIMENSION(3):: PCELL
47 ! order of interpolation set in the call to set_interpolation_scheme
48 ! unless it is re/set later through the call to
49 ! set_interpolation_stencil
50  INTEGER :: ONEW
51 ! constant whose value depends on dimension of system
52 ! avg_factor=0.250 (in 3D) or =0.50 (in 2D)
53  DOUBLE PRECISION :: AVG_FACTOR
54 ! index of solid phase that particle NP belongs to
55  INTEGER :: M
56 ! particle number index, used for looping
57  INTEGER :: NP, NINDX
58 ! Statistical weight of the particle. Equal to one for DEM
59  DOUBLE PRECISION :: WTP
60 
61  DOUBLE PRECISION :: MASS_SOL1, MASS_SOL2
62 ! sum of mass_sol1 and mass_sol2 across all processors
63  DOUBLE PRECISION :: MASS_SOL1_ALL, MASS_SOL2_ALL
64 
65  DOUBLE PRECISION :: TEMP1, TEMP2
66 
67  DOUBLE PRECISION, DIMENSION(3) :: DES_VEL_DENSITY
68  DOUBLE PRECISION :: DES_ROP_DENSITY
69 
70  INTEGER :: COUNT_NODES_OUTSIDE, COUNT_NODES_INSIDE, &
71  COUNT_NODES_INSIDE_MAX
72 
73  DOUBLE PRECISION :: RESID_ROPS(dimension_m)
74  DOUBLE PRECISION :: RESID_VEL(3, dimension_m)
75  DOUBLE PRECISION :: NORM_FACTOR
76 
77 !Handan Liu added on Jan 17 2013
78  DOUBLE PRECISION, DIMENSION(2,2,2,3) :: gst_tmp
79  DOUBLE PRECISION, DIMENSION(2,2,2) :: weight_ft
80 
81 ! total number of 'solids' phases in simulation
82  INTEGER :: MMAX_TOT
83 !......................................................................!
84 
85 ! initializing
86  mass_sol1 = zero
87  mass_sol2 = zero
88  mass_sol1_all = zero
89  mass_sol2_all = zero
90 ! avg_factor=0.25 (in 3D) or =0.5 (in 2D)
91  avg_factor = merge(0.5d0, 0.25d0, no_k)
92 
93 ! cartesian_grid related quantities
94  count_nodes_inside_max = merge(4, 8, no_k)
95 
96  mmax_tot = des_mmax+mmax
97 ! initialize only information related to the discrete 'phases' of
98 ! these continuous variables
99  rop_s(:,mmax+1:mmax_tot) = zero
100  u_s(:,mmax+1:mmax_tot) = zero
101  v_s(:,mmax+1:mmax_tot) = zero
102  IF(do_k) w_s(:,mmax+1:mmax_tot) = zero
103 ! these are not 'continuous' variables but used only within this routine...
104  des_vel_node = zero
105  des_rops_node = zero
106 
107 ! sets several quantities including interp_scheme, scheme, and
108 ! order and allocates arrays necessary for interpolation
110 
111 !$omp parallel default(shared) &
112 !$omp private(IJK, I, J, K, PCELL, IW, IE, JS, JN, KB, KTP, ONEW, &
113 !$omp GST_TMP, COUNT_NODES_INSIDE, II, JJ, KK, CUR_IJK, &
114 !$omp NINDX, NP, WTP, M, WEIGHT_FT, I1, I2, J1, J2, K1, K2, &
115 !$omp IDIM, IJK2, NORM_FACTOR, RESID_ROPS, RESID_VEL, &
116 !$omp COUNT_NODES_OUTSIDE, TEMP1)
117 !$omp do reduction(+:MASS_SOL1) reduction(+:DES_ROPS_NODE,DES_VEL_NODE)
118  DO ijk = ijkstart3,ijkend3
119 
120 ! Cycle this cell if not in the fluid domain or if it contains no
121 ! particle/parcel
122  IF(.NOT.fluid_at(ijk)) cycle
123  IF( pinc(ijk) == 0) cycle
124 
125  pcell(1) = i_of(ijk)-1
126  pcell(2) = j_of(ijk)-1
127  pcell(3) = merge(k_of(ijk)-1, 1, do_k)
128 
129 ! setup the stencil based on the order of interpolation and factoring in
130 ! whether the system has any periodic boundaries. sets onew to order.
131  CALL set_interpolation_stencil(pcell, iw, ie, js, jn, kb, ktp,&
132  interp_scheme, dimn, ordernew=onew)
133 
134  count_nodes_outside = 0
135 ! Computing/setting the geometric stencil
136  DO k=1, merge(1, onew, no_k)
137  DO j=1, onew
138  DO i=1, onew
139  ii = iw + i-1
140  jj = js + j-1
141  kk = kb + k-1
142  cur_ijk = funijk_map_c(ii,jj,kk)
143  gst_tmp(i,j,k,1) = xe(ii)
144  gst_tmp(i,j,k,2) = yn(jj)
145  gst_tmp(i,j,k,3) = merge(dz(1), zt(kk), no_k)
146  IF(cartesian_grid) THEN
147  IF(scalar_node_atwall(cur_ijk)) &
148  count_nodes_outside = count_nodes_outside + 1
149  ENDIF
150  ENDDO
151  ENDDO
152  ENDDO
153 
154 
155 ! Calculate des_rops_node so rop_s, and in turn, ep_g can be updated
156 !----------------------------------------------------------------->>>
157 
158 ! looping through particles in the cell
159  DO nindx=1, pinc(ijk)
160  np = pic(ijk)%P(nindx)
161  call drag_weightfactor(gst_tmp,des_pos_new(np,:),weight_ft)
162  m = pijk(np,5)
163  wtp = one
164 
165  IF(mppic) wtp = des_stat_wt(np)
166 
167  mass_sol1 = mass_sol1 + pmass(np)*wtp
168  temp2 = pmass(np)*wtp
169  DO k = 1, merge(1, onew, no_k)
170  DO j = 1, onew
171  DO i = 1, onew
172 ! shift loop index to new variables for manipulation
173  ii = iw + i-1
174  jj = js + j-1
175  kk = kb + k-1
176  cur_ijk = funijk_map_c(ii,jj,kk)
177  temp1 = weight_ft(i,j,k)*temp2
178 
179  des_rops_node(cur_ijk,m) = des_rops_node(cur_ijk,m) + &
180  temp1
181  des_vel_node(cur_ijk,:,m) = des_vel_node(cur_ijk,:,m) + &
182  temp1*des_vel_new(np,:)
183  ENDDO
184  ENDDO
185  ENDDO
186  ENDDO ! end do (nindx=1,pinc(ijk))
187 !-----------------------------------------------------------------<<<
188 
189 
190 ! Only for cutcell cases may count_nodes_inside become less than its
191 ! original set value. In such an event, the contribution of scalar nodes
192 ! that do not reside in the domain is added to a residual array. This
193 ! array is then redistribited equally to the nodes that are in the fluid
194 ! domain. These steps are done to conserve mass.
195 !----------------------------------------------------------------->>>
196 ! initializing
197  resid_rops = zero
198  resid_vel = zero
199  IF (cartesian_grid) THEN
200 
201 ! only for cartesian_grid will count_nodes_outside be modified from zero
202  count_nodes_inside = count_nodes_inside_max - &
203  count_nodes_outside
204 
205  IF(count_nodes_inside.LT.count_nodes_inside_max) THEN
206 
207 ! Convention used to number node numbers
208 ! i=1, j=2 i=2, j=2
209 ! _____________________
210 ! | |
211 ! | I = 2, J = 2 |
212 ! |___________________|
213 ! i=1, j=1 i=2, j=1
214 ! setting indices based on convention
215  i = i_of(ijk)
216  j = j_of(ijk)
217  k = k_of(ijk)
218  i1 = i-1
219  i2 = i
220  j1 = j-1
221  j2 = j
222  k1 = merge(k, k-1, no_k)
223  k2 = k
224 
225 ! first calculate the residual des_rops_node and des_vel_node that was
226 ! computed on nodes that do not belong to the domain
227  DO kk = k1, k2
228  DO jj = j1, j2
229  DO ii = i1, i2
230  ijk2 = funijk(ii, jj, kk)
231  IF(scalar_node_atwall(ijk2)) THEN
232  DO m = mmax+1,mmax_tot
233  resid_rops(m) = resid_rops(m) + &
234  des_rops_node(ijk2,m)
235  des_rops_node(ijk2,m) = zero
236 
237  DO idim = 1, merge(2,3,no_k)
238  resid_vel(idim,m) = resid_vel(idim, m) + &
239  des_vel_node(ijk2,idim, m)
240  des_vel_node(ijk2,idim, m) = zero
241  ENDDO
242  ENDDO
243  ENDIF
244  ENDDO
245  ENDDO
246  ENDDO
247 
248 ! now add this residual equally to the remaining nodes
249  norm_factor = one/REAL(count_nodes_inside)
250  DO kk = k1, k2
251  DO jj = j1, j2
252  DO ii = i1, i2
253  ijk2 = funijk(ii, jj, kk)
254  IF(.NOT.scalar_node_atwall(ijk2)) THEN
255  DO m = mmax+1,mmax_tot
256  des_rops_node(ijk2,m) = &
257  des_rops_node(ijk2,m) + &
258  resid_rops(m)*norm_factor
259  DO idim = 1, merge(2,3,no_k)
260  des_vel_node(ijk2,idim, m) = &
261  des_vel_node(ijk2,idim, m) + &
262  resid_vel(idim, m)*norm_factor
263  ENDDO
264  ENDDO
265  ENDIF
266  ENDDO
267  ENDDO
268  ENDDO
269 
270  ENDIF
271  ENDIF ! end if (cartesian_grid)
272  ENDDO
273 !$omp end parallel
274 
275 
276 ! At the interface des_rops_node has to be added since particles
277 ! across the processors will contribute to the same scalar node.
278 ! sendrecv will be called and the node values will be added
279 ! at the junction. des_rops_node is altered by the routine when
280 ! periodic boundaries are invoked
282 
283 
284 ! Now go from node to scalar center. Same convention as sketched
285 ! earlier
286 !----------------------------------------------------------------->>>
287 ! Explanation by RG: 08/17/2012
288 ! the approach used here is to make it general enough for cutcells to be
289 ! included as well. The new changes do not alter earlier calculations
290 ! but make the technique general as to include cartesian grid (cut-cell)
291 ! simulations.
292 ! Previously, the volume of the node (by array des_vol_node) was used to
293 ! first scale the nodal values. Subsequently, these nodal values were
294 ! equally added to compute the cell centered values for the scalar cell.
295 
296 ! Consider an internal node next to an edge node (a node adjacent to a
297 ! boundary). In 2D, the volume of an edge node will be half that of an
298 ! internal node. And an edge node will contribute double compared to
299 ! an internal node to the value of the scalar cell they share. These
300 ! calculations were previously accomplished via the variable volume of
301 ! node. Now this is accomplished by the ratio vol(ijk2)/vol_sur, where
302 ! vol(ijk2) is the volume of the scalar cell in consideration and
303 ! vol_sur is the sum of all the scalar cell volumes that have this node
304 ! as the common node.
305 
306 !$omp parallel do default(none) collapse (3) &
307 !$omp shared(KSTART2, KEND1, JSTART2, JEND1, ISTART2, IEND1, DO_K, &
308 !$omp VOL, DEAD_CELL_AT, FUNIJK_MAP_C, VOL_SURR, MMAX_TOT, &
309 !$omp MMAX, DES_ROPS_NODE, DES_VEL_NODE) &
310 !$omp private(I, J, K, IJK, M, II, JJ, KK, IJK2, DES_ROP_DENSITY, &
311 !$omp DES_VEL_DENSITY) &
312 !$omp reduction(+:ROP_S, U_S, V_S, W_S)
313  DO k = kstart2, kend1
314  DO j = jstart2, jend1
315  DO i = istart2, iend1
316  IF (dead_cell_at(i,j,k)) cycle ! skip dead cells
317  ijk = funijk(i,j,k)
318  if (vol_surr(ijk).eq.zero) cycle ! no FLUID_AT any of the stencil points have
319 
320 ! looping over stencil points (NODE VALUES)
321  DO m = mmax+1, mmax_tot
322  des_rop_density = des_rops_node(ijk, m)/vol_surr(ijk)
323  des_vel_density(:) = des_vel_node(ijk, :, m)/vol_surr(ijk)
324 
325  DO kk = k, merge(k+1, k, do_k)
326  DO jj = j, j+1
327  DO ii = i, i+1
328  IF (dead_cell_at(ii,jj,kk)) cycle ! skip dead cells
329 
330  ijk2 = funijk_map_c(ii, jj, kk)
331  IF(fluid_at(ijk2).and.(is_on_mype_wobnd(ii, jj, kk))) THEN
332 ! Since the data in the ghost cells is spurious anyway and overwritten during
333 ! subsequent send receives, do not compute any value here as this will
334 ! mess up the total mass value that is computed below to ensure mass conservation
335 ! between Lagrangian and continuum representations
336  rop_s(ijk2, m) = rop_s(ijk2, m) + des_rop_density*vol(ijk2)
337  u_s(ijk2, m) = u_s(ijk2, m) + des_vel_density(1)*vol(ijk2)
338  v_s(ijk2, m) = v_s(ijk2, m) + des_vel_density(2)*vol(ijk2)
339  IF(do_k) w_s(ijk2, m) = w_s(ijk2, m) + des_vel_density(3)*vol(ijk2)
340  ENDIF
341  ENDDO ! end do (ii=i1,i2)
342  ENDDO ! end do (jj=j1,j2)
343  ENDDO ! end do (kk=k1,k2)
344  ENDDO
345  ENDDO ! end do (i=istart2,iend1)
346  ENDDO ! end do (j=jstart2,jend1)
347  ENDDO ! end do (k=kstart2,kend1)
348 !omp end parallel do
349 !-----------------------------------------------------------------<<<
350 
351 
352 !$omp parallel do default(none) private(IJK, M) &
353 !$omp shared(IJKSTART3, IJKEND3, DO_K, MMAX_TOT, MMAX, ROP_s, U_S, &
354 !$omp V_S, W_S, VOL)
355  DO ijk = ijkstart3, ijkend3
356  IF(.NOT.fluid_at(ijk)) cycle
357  DO m = mmax+1, mmax_tot
358  IF(rop_s(ijk, m).GT.zero) THEN
359  u_s(ijk, m) = u_s(ijk,m)/rop_s(ijk, m)
360  v_s(ijk, m) = v_s(ijk,m)/rop_s(ijk, m)
361  IF(do_k) w_s(ijk, m) = w_s(ijk,m)/rop_s(ijk, m)
362 ! Divide by scalar cell volume to obtain the bulk density
363  rop_s(ijk, m) = rop_s(ijk, m)/vol(ijk)
364  ENDIF
365  ENDDO ! end loop over M=1,MMAX_TOT
366  ENDDO ! end loop over IJK=ijkstart3,ijkend3
367 !omp end parallel do
368 
369 
370  IF (mppic) CALL send_recv(rop_s,2)
371  CALL calc_epg_des
372 
373  IF(mppic) THEN
374 ! Now calculate Eulerian mean velocity fields for discrete phases
375  CALL send_recv(u_s,2)
376  CALL send_recv(v_s,2)
377  IF(do_k) CALL send_recv(w_s,2)
378 
379 ! The Eulerian velocity field is used to set up the stencil to interpolate
380 ! mean solid velocity at the parcel's location. U_S could have also been
381 ! used, but that also would have require the communication at this stage.
382 ! The final interpolated value does not change if the stencil is formed by
383 ! first obtaining face centered Eulerian velocities (U_S, etc.)
384 ! and then computing the node velocities from them or directly computing
385 ! the node velocities from cell centered average velocity field (U_S,
386 ! etc.). We are using the first approach as it is more natural to set
387 ! BC's on solid velocity field in the face centered represenation (U_S,
388 ! etc.)
389  IF(.NOT.cartesian_grid) THEN
391  ELSE
393  ENDIF
394  ENDIF ! end if (.not.mppic)
395 
396 ! turn on the below statements to check if the mass is conserved
397 ! between discrete and continuum representations. Should be turned to
398 ! false for any production runs.
399  IF(des_report_mass_interp) THEN
400  DO ijk = ijkstart3, ijkend3
401  IF(.NOT.fluid_at(ijk)) cycle
402  i = i_of(ijk)
403  j = j_of(ijk)
404  k = k_of(ijk)
405 ! It is important to check both FLUID_AT and IS_ON_MYPE_WOBND.
406  IF(is_on_mype_wobnd(i,j,k)) THEN
407  DO m=mmax+1,mmax_tot
408  mass_sol2 = mass_sol2 + &
409  rop_s(ijk,m)*vol(ijk)
410  ENDDO
411  ENDIF
412  ENDDO
413  CALL global_sum(mass_sol1, mass_sol1_all)
414  CALL global_sum(mass_sol2, mass_sol2_all)
415  if(mype.eq.pe_io) THEN
416  WRITE(*,'(/,5x,A,4(2x,g17.8),/)') &
417  'SOLIDS MASS DISCRETE AND CONTINUUM = ', &
418  mass_sol1_all, mass_sol2_all
419  ENDIF
420  ENDIF
421 
422  END SUBROUTINE comp_mean_fields0
423 
424 
425 
426 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC
427 ! Subroutine: DRAG_WEIGHTFACTOR C
428 ! Purpose: DES - Calculate the fluid velocity interpolated at the C
429 ! particle's location and weights. Replace 'interpolator' C
430 ! interface for OpenMP implementation. C
431 ! C
432 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC
433 
434  SUBROUTINE drag_weightfactor(GSTEN,DESPOS,WEIGHTFACTOR)
436  use geometry, only: no_k
437 
438  IMPLICIT NONE
439 
440 !-----------------------------------------------
441 ! Local Variables
442 !-----------------------------------------------
443  DOUBLE PRECISION, DIMENSION(2,2,2,3), INTENT(IN):: GSTEN
444  DOUBLE PRECISION, DIMENSION(3), INTENT(IN):: DESPOS
445  DOUBLE PRECISION, DIMENSION(2,2,2), INTENT(OUT) :: WEIGHTFACTOR
446  INTEGER :: II, JJ, KK
447 
448  DOUBLE PRECISION, DIMENSION(2) :: XXVAL, YYVAL, ZZVAL
449  DOUBLE PRECISION :: DXX, DYY, DZZ
450  DOUBLE PRECISION, DIMENSION(3) :: ZETAA
451 
452  dxx = gsten(2,1,1,1) - gsten(1,1,1,1)
453  dyy = gsten(1,2,1,2) - gsten(1,1,1,2)
454 
455  zetaa(1:2) = despos(1:2) - gsten(1,1,1,1:2)
456 
457  zetaa(1) = zetaa(1)/dxx
458  zetaa(2) = zetaa(2)/dyy
459 
460  xxval(1)=1-zetaa(1)
461  yyval(1)=1-zetaa(2)
462  xxval(2)=zetaa(1)
463  yyval(2)=zetaa(2)
464 
465  IF(no_k) THEN
466  DO jj=1,2
467  DO ii=1,2
468  weightfactor(ii,jj,1) = xxval(ii)*yyval(jj)
469  ENDDO
470  ENDDO
471  ELSE
472  dzz = gsten(1,1,2,3) - gsten(1,1,1,3)
473  zetaa(3) = despos(3) - gsten(1,1,1,3)
474  zetaa(3) = zetaa(3)/dzz
475  zzval(1)=1-zetaa(3)
476  zzval(2)=zetaa(3)
477  DO kk=1,2
478  DO jj=1,2
479  DO ii=1,2
480  weightfactor(ii,jj,kk) = xxval(ii)*yyval(jj)*zzval(kk)
481  ENDDO
482  ENDDO
483  ENDDO
484  ENDIF
485 
486  END SUBROUTINE drag_weightfactor
double precision, dimension(:,:), allocatable v_s
Definition: fldvar_mod.f:105
double precision, dimension(:), allocatable vol_surr
Definition: geometry_mod.f:215
integer, dimension(:), allocatable i_of
Definition: indices_mod.f:45
subroutine des_addnodevalues_mean_fields()
integer ijkend3
Definition: compar_mod.f:80
integer kend1
Definition: compar_mod.f:80
integer iend1
Definition: compar_mod.f:80
double precision, parameter one
Definition: param1_mod.f:29
integer istart2
Definition: compar_mod.f:80
double precision, dimension(:,:), allocatable w_s
Definition: fldvar_mod.f:117
Definition: drag_mod.f:11
subroutine, public set_interpolation_stencil(PC, IW, IE, JS, JN, KB, KTP, isch, dimprob, ordernew)
character(len=3), dimension(dim_m) solids_model
Definition: run_mod.f:253
subroutine comp_mean_fields0
integer kstart2
Definition: compar_mod.f:80
double precision, dimension(:,:), allocatable u_s
Definition: fldvar_mod.f:93
logical des_report_mass_interp
subroutine drag_weightfactor(GSTEN, DESPOS, WEIGHTFACTOR)
integer, dimension(:), allocatable k_of
Definition: indices_mod.f:47
integer pe_io
Definition: compar_mod.f:30
integer mmax
Definition: physprop_mod.f:19
logical, dimension(:,:,:), allocatable dead_cell_at
Definition: compar_mod.f:127
integer, dimension(:), allocatable j_of
Definition: indices_mod.f:46
subroutine mppic_comp_eulerian_vels_non_cg
Definition: pic_routines.f:8
integer jstart2
Definition: compar_mod.f:80
Definition: run_mod.f:13
Definition: param_mod.f:2
logical cartesian_grid
Definition: cutcell_mod.f:13
subroutine mppic_comp_eulerian_vels_cg
Definition: pic_routines.f:93
logical no_k
Definition: geometry_mod.f:28
subroutine calc_epg_des
Definition: calc_epg_des.f:15
subroutine, public set_interpolation_scheme(choice)
logical do_k
Definition: geometry_mod.f:30
integer mype
Definition: compar_mod.f:24
integer ijkstart3
Definition: compar_mod.f:80
logical, dimension(:), allocatable scalar_node_atwall
Definition: cutcell_mod.f:467
double precision, dimension(:,:), allocatable rop_s
Definition: fldvar_mod.f:51
logical mppic
Definition: mfix_pic_mod.f:14
type(iap1), dimension(:), allocatable pic
double precision, dimension(:), allocatable vol
Definition: geometry_mod.f:212
integer dimension_m
Definition: param_mod.f:18
double precision, dimension(:), allocatable des_stat_wt
Definition: mfix_pic_mod.f:54
double precision, parameter zero
Definition: param1_mod.f:27
integer jend1
Definition: compar_mod.f:80
Definition: bc_mod.f:23