1 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC 2 ! C 3 ! Subroutine: CFASSIGN C 4 ! C 5 ! Purpose: Assign the necessary values for DEM computation. For C 6 ! example: C 7 ! - assigning DEM boundaries from the values entered for C 8 ! MFIX input in mfix.datat C 9 ! - assigning DEM gravity vector from MFIX input. C 10 ! - calculating DTSOLID based on particle properties: spring C 11 ! coefficient, damping factor & mass C 12 ! C 13 ! Reviewer: Sreekanth Pannala Date: 09-Nov-06 C 14 ! C 15 ! Reviewer: Rahul Garg Date: 25-Mar-14 C 16 ! Comments: Breaking this subroutine into several subroutines for DEM C 17 ! and PIC models C 18 !^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^C 19 20 SUBROUTINE CFASSIGN 21 22 !----------------------------------------------- 23 ! Modules 24 !----------------------------------------------- 25 USE param1 26 USE constant, only : GRAVITY_X, GRAVITY_Y, GRAVITY_Z 27 USE discretelement 28 USE mfix_pic 29 use geometry, only: DO_K 30 use error_manager 31 ! Flag: DEM solids present. 32 use run, only: DEM_SOLIDS 33 ! Runtime flag specifying MPPIC solids 34 use run, only: PIC_SOLIDS 35 36 IMPLICIT NONE 37 !----------------------------------------------- 38 ! Local Variables 39 !----------------------------------------------- 40 41 CALL INIT_ERR_MSG("CFASSIGN") 42 43 ! The common assignments are done in this routine. 44 ! The model spcific assignmets are moved to the specific subroutines 45 46 GRAV(1) = GRAVITY_X 47 GRAV(2) = GRAVITY_Y 48 GRAV(3) = GRAVITY_Z 49 GRAV_MAG = sqrt(dot_product(GRAV,GRAV)) 50 51 IF(DEM_SOLIDS) CALL CFASSIGN_DEM 52 IF(PIC_SOLIDS) CALL CFASSIGN_PIC 53 54 ! Finalize the error manager. 55 CALL FINL_ERR_MSG 56 57 RETURN 58 END SUBROUTINE CFASSIGN 59 60 61 62 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC 63 ! C 64 ! Subroutine: CFASSIGN_PIC C 65 ! C 66 ! Purpose: PIC related cfassign source code moved here C 67 ! example: C 68 ! - calculating DTSOLID based on particle response time C 69 ! C 70 ! C 71 ! Reviewer: Rahul Garg Date: 25-Mar-14 C 72 !^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^C 73 74 SUBROUTINE CFASSIGN_PIC 75 76 77 !----------------------------------------------- 78 ! Modules 79 !----------------------------------------------- 80 USE param1 81 USE constant, only: Pi 82 USE discretelement, only: des_mmax, dtsolid 83 USE discretelement, only: DES_RO_s, DES_D_P0 84 USE physprop, only: MU_g0 85 USE mfix_pic, only : dtpic_taup, des_tau_p 86 use error_manager 87 IMPLICIT NONE 88 !----------------------------------------------- 89 ! Local Variables 90 !----------------------------------------------- 91 INTEGER :: M 92 93 CALL INIT_ERR_MSG("CFASSIGN_PIC") 94 95 DO M = 1, DES_MMAX 96 DES_TAU_P(M) = DES_RO_S(M)*(DES_D_P0(M)**2.d0)/(18.d0*MU_g0) 97 WRITE(err_msg,'(/A,I2,A,G17.8)') & 98 'TAU_P FOR ', M,'th SOLID PHASE= ', DES_TAU_P(M) 99 CALL FLUSH_ERR_MSG (Header = .false., Footer = .false.) 100 101 ENDDO 102 103 DTSOLID = MINVAL(DES_TAU_P(1:DES_MMAX)) 104 DTPIC_TAUP = DTSOLID !maximum dt for point-particles based on taup 105 106 WRITE(ERR_MSG, 1000) DTSolid 107 CALL FLUSH_ERR_MSG(Header = .false.) 108 109 1000 format('MPPIC: Point-particle ',& 110 'approximation for particle-particle and ', /, & 111 'particle-wall collisions', /, & 112 'DTSOLID based on particle time response Taup', /, & 113 'DTSOLID = ', 2x, E17.10) 114 115 ! Finalize the error manager. 116 CALL FINL_ERR_MSG 117 118 END SUBROUTINE CFASSIGN_PIC 119 120 121 122 123 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC 124 ! C 125 ! Subroutine: CFASSIGN_DEM C 126 ! C 127 ! Purpose: DEM related cfassign source code moved here C 128 ! example: C 129 ! - calculating DTSOLID based on particle properties: spring C 130 ! coefficient, damping factor & mass C 131 ! C 132 ! C 133 ! Reviewer: Rahul Garg Date: 25-Mar-14 C 134 !^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^C 135 136 SUBROUTINE CFASSIGN_DEM 137 138 139 !----------------------------------------------- 140 ! Modules 141 !----------------------------------------------- 142 USE param1 143 USE constant, only: Pi 144 USE discretelement 145 use error_manager 146 IMPLICIT NONE 147 !----------------------------------------------- 148 ! Local Variables 149 !----------------------------------------------- 150 151 CALL INIT_ERR_MSG("CFASSIGN_DEM") 152 153 ! WRITE(ERR_MSG,'(A)') 'Setting collision model parameters for DEM' 154 ! CALL FLUSH_ERR_MSG (Footer = .false.) 155 156 ! Finalize the error manager. 157 CALL FINL_ERR_MSG 158 END SUBROUTINE CFASSIGN_DEM 159 160 !vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvC 161 ! subroutine: compute_volume_of_nodes C 162 ! Author: Rahul Garg C 163 ! Dare: Dec 20, 2011 C 164 ! Purpose: Due to the staggered grid, the interpolaiton of mean fields C 165 ! is always first done at the nodes (of the scalar cell) for any quantity. C 166 ! For a quantity like drag force or ep_s, one needs to assign a geometric C 167 ! volume to a node. In the past this was done on-the-fly in drag_fgs.f. C 168 ! VCELL was the variable that was used and it used to be incorrecty set to C 169 ! the volume of the scalar cell that the particle belongs to. C 170 ! This will be ok for uniform grid and will not hold for non-uniform grids.C 171 ! In addition, at the edges (in 2-D, the node volume is only half of C 172 ! the standard cell volume. And for corner (in 2-D), it is only one fourth.C 173 ! This was also done on-the-fly earlier C 174 ! But again the volume of the cell was used, which was not correct C 175 ! also not extendable to cut-cell. So this routine computes the geoemetric C 176 ! volume of the nodes. C 177 ! C 178 ! des_vol_node_ratio was defined in order to develop an estimation C 179 ! algorithm for computing mean fields when Cartesian grid is on. C 180 ! Subsequently, another algorithm was developed that negated the need for C 181 ! des_vol_node_ratio. C 182 ! However, still keeping this variable for now (RG: July 27, 2012) C 183 !^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^C 184 185 SUBROUTINE compute_volume_of_nodes 186 187 !----------------------------------------------- 188 ! Modules 189 !----------------------------------------------- 190 USE param 191 USE param1 192 USE parallel 193 USE physprop 194 USE fldvar 195 USE run 196 USE geometry 197 USE indices 198 USE bc 199 USE compar 200 USE sendrecv 201 USE discretelement 202 USE cutcell 203 USE functions 204 implicit none 205 !----------------------------------------------- 206 ! Local Variables 207 !----------------------------------------------- 208 ! ijk index of fluid grid and corresponding i, j, k indices 209 integer :: ijk, iraw, jraw, kraw 210 ! i, j, k and (i+1, j+1, k+1) indices corrected for any 211 ! cyclic ghost boundaries on fluid grid 212 INTEGER :: I, J, K, ip, jp, kp 213 integer :: ipjk, ijpk, ipjpk, ijkp, ipjkp, ijpkp, ipjpkp 214 ! volume of indicated grid 215 double precision :: vol_ijk, vol_ipjk, vol_ijpk, vol_ipjpk 216 double precision :: vol_ijkp, vol_ipjkp, vol_ijpkp, vol_ipjpkp 217 double precision :: vol_node_count, vol_node_actual_count 218 ! weighting factor 219 double precision :: avg_factor 220 ! not used? 221 double precision :: vol_node_uncorr 222 !----------------------------------------------- 223 224 avg_factor = merge(0.25d0, 0.125d0, NO_K) 225 226 ! compute the volume at the grid nodes 227 ! grid nodes start from istart2 to iend1 228 vol_node_count = merge(4., 8., NO_K) 229 230 DO ijk = ijkstart3,ijkend3 231 des_vol_node(ijk) = zero 232 iraw = i_of(ijk) 233 jraw = j_of(ijk) 234 kraw = k_of(ijk) 235 236 237 238 ! start at 1 (ghost cell) and go to last fluid cell. why start at a 239 ! ghost cell and not a fluid cell? 240 ! See below 241 242 ! Since we are interested in nodes making up the interpolation stencil, 243 ! their numbering goes from 1 to iend1. 244 ! Think of a case with IMAX = 3. Here the nodes where the interpolation will be 245 ! done will run from 1 (=istart2) to 4 (=iend1). 246 IF(iraw.LT.istart2 .OR. iraw.GT.iend1) CYCLE 247 IF(jraw.LT.jstart2 .OR. jraw.GT.jend1) CYCLE 248 IF(kraw.LT.kstart2 .OR. kraw.GT.kend1) CYCLE 249 250 ! this will force indices of ghost cells on cyclic borders back to 251 ! the corresponding fluid cells. since we are using i, j, k indices and 252 ! not just a composite ijk index we need these to be shifted to account 253 ! for periodicity 254 I = imap_c(iraw) 255 J = jmap_c(jraw) 256 K = kmap_c(kraw) 257 IP = imap_c(iraw+1) 258 JP = jmap_c(jraw+1) 259 260 ! using a function like ip_of(ijk) should work the same as getting funijk 261 ! of the shifted i, j, k indices. however, small differences will 262 ! occur on the 'edges/corners'. so retaining the latter method at this 263 ! time. see j. galvin for discussion 264 ipjk = funijk(IP,J,K) 265 ijpk = funijk(I,JP,K) 266 ipjpk = funijk(IP,JP,K) 267 268 ! the existing variable vol(ijk) is not used here for cut-cell reasons 269 ! see r. garg for discussion 270 vol_ijk = dx(I) *dy(J) *dz(K) 271 vol_ipjk = dx(IP)*dy(J) *dz(K) 272 vol_ijpk = dx(I) *dy(JP)*dz(K) 273 vol_ipjpk = dx(IP)*dy(JP)*dz(K) 274 275 vol_node_uncorr = avg_factor*(vol_ijk + vol_ipjk + vol_ijpk + vol_ipjpk) 276 vol_node_actual_count = vol_node_count 277 278 IF(.NOT.FLUID_AT(ijk)) THEN 279 vol_node_actual_count = vol_node_actual_count - 1 280 vol_ijk = zero 281 ENDIF 282 283 IF(.NOT.FLUID_AT(ipjk)) THEN 284 vol_node_actual_count = vol_node_actual_count - 1 285 vol_ipjk = zero 286 ENDIF 287 288 IF(.NOT.FLUID_AT(ijpk)) THEN 289 vol_node_actual_count = vol_node_actual_count - 1 290 vol_ijpk = zero 291 ENDIF 292 293 IF(.NOT.FLUID_AT(ipjpk)) THEN 294 vol_node_actual_count = vol_node_actual_count - 1 295 vol_ipjpk = zero 296 ENDIF 297 298 ! this will have non-zero values for non-fluid cells at the 299 ! west/south/bottom borders but not for east/north/top borders? 300 des_vol_node(ijk) = avg_factor*(vol_ijk + vol_ipjk + & 301 vol_ijpk + vol_ipjpk) 302 303 IF (DO_K) THEN 304 KP = kmap_c(kraw+1) 305 ijkp = funijk(I, J, KP) 306 ipjkp = funijk(IP,J, KP) 307 ijpkp = funijk(I, JP,KP) 308 ipjpkp = funijk(IP,JP,KP) 309 310 vol_ijkp = dx(I) *dy(J) *dz(KP) 311 vol_ipjkp = dx(IP)*dy(J) *dz(KP) 312 vol_ijpkp = dx(I) *dy(JP)*dz(KP) 313 vol_ipjpkp = dx(IP)*dy(JP)*dz(KP) 314 315 vol_node_uncorr = avg_factor*(vol_node_uncorr + vol_ijkp + & 316 vol_ipjkp + vol_ijpkp + vol_ipjpkp) 317 318 IF(.NOT.FLUID_AT(ijkp)) THEN 319 vol_node_actual_count = vol_node_actual_count - 1 320 vol_ijkp = zero 321 ENDIF 322 323 IF(.NOT.FLUID_AT(ipjkp)) THEN 324 vol_node_actual_count = vol_node_actual_count - 1 325 vol_ipjkp = zero 326 ENDIF 327 328 IF(.NOT.FLUID_AT(ijpkp)) THEN 329 vol_node_actual_count = vol_node_actual_count - 1 330 vol_ijpkp = zero 331 ENDIF 332 333 IF(.NOT.FLUID_AT(ipjpkp)) THEN 334 vol_node_actual_count = vol_node_actual_count - 1 335 vol_ipjpkp = zero 336 ENDIF 337 338 des_vol_node(ijk) = des_vol_node(ijk) + avg_factor*& 339 (vol_ijkp + vol_ipjpkp + vol_ijpkp + vol_ipjkp) 340 341 ENDIF 342 343 344 ENDDO ! end do ijk=ijkstart3,ijkend3 345 346 RETURN 347 RETURN 348 END SUBROUTINE compute_volume_of_nodes 349 350