! module edynamo_driver use shr_kind_mod,only: r8 => shr_kind_r8 ! 8-byte reals use params ,only: set_cons,iulog use geogrid ,only: nlon,nlat,jspole,jnpole use maggrid ,only: set_maggrid,nmlonp1,nmlat use getapex ,only: get_apex ! get mag apex coordinates use my_mpi ,only: mytid,mp_init,mp_decomp,mp_close,lev0,lev1,lon0,lon1,& lat0,lat1,mlon0,mlon1,mlat0,mlat1 use namelist ,only: get_namelist,model_name,input_ncfile,output_ncfile use heelis ,only: heelis_model use edynamo ,only: dynamo,ed1,ed2 use output ,only: output_init,write_output use read_ncfile ,only: & get_geogrid,read_waccm,read_tgcm, & ! subroutines ntime,iday,year,ut, & ! time and date tn,un,vn,omega,zpot,barm,ped,hall ! 3d input fields use timing ,only: report_timing,time_total use addfld_mod ,only: addfld ! routine to add field to output implicit none #include real(r8) :: & ! for local timing begtotal,endtotal,begstep,endstep ! contains !----------------------------------------------------------------------- subroutine edyn_init ! ! Init edynamo: This should be called once per run before edyn_driver. ! call set_cons ! set runtime constants call mp_init ! initialize mpi library begtotal = mpi_wtime() ! begin wallclock timing call get_namelist(mytid) ! get namelist input from user ! ! Read dimensions and coordinate vars from input file, ! and set global geographic grid. ! call get_geogrid(input_ncfile,model_name) ! ! Set parameter-based geomagnetic grid: ! call set_maggrid ! define parameter-based magnetic grid ! ! Decompose geographic and magnetic grids: ! call mp_decomp(trim(model_name)) ! set up domain decomposition ! ! Initialize output file: ! (must be called once per run, before write_output) ! call output_init ! initialize output end subroutine edyn_init !----------------------------------------------------------------------- subroutine edyn_driver(ed1_glb,ed2_glb,zigm11_glbin,zigm22_glbin,& zigm2_glbin,zigmc_glbin,rim1_glbin,rim2_glbin) ! ! Args: real(r8),dimension(nmlat,nmlonp1),intent(out) :: ed1_glb,ed2_glb real(r8),dimension(nmlat,nmlonp1),optional,intent(in) :: & zigm11_glbin,zigm22_glbin,zigm2_glbin,zigmc_glbin, & rim1_glbin,rim2_glbin ! ! Local: integer :: itime,itime0,itime1 real(r8) :: secs real(r8) :: steptime=0._r8 ! wc time per step integer,save :: ncalls=1 logical :: do_integrals ! ! If the integrated conductances are passed in from TIME3D, then ! edynamo does not do the fieldline integrations, and passes these ! global inputs to the solver. When coupling with TIME3D, these ! come in as (nmlat,nmlonp1), so they need to be transformed to ! (nmlonp1,nmlat) for the solver (sub transform_glbin). ! do_integrals = .true. if (present(zigm11_glbin).and.present(zigm22_glbin).and.& present(zigm2_glbin) .and.present(zigmc_glbin) .and.& present(rim1_glbin) .and.present(rim2_glbin)) then do_integrals = .false. call transform_glbin(zigm11_glbin,zigm22_glbin,& zigm2_glbin,zigmc_glbin,rim1_glbin,rim2_glbin,nmlat,nmlonp1) endif ! ! For now, increment itime for reading waccm or timegcm input ! histories at every call by time3d, but keep itime <= ntime: ! itime0 = mod(ncalls-1,ntime)+1 itime1 = itime0 do itime=itime0,itime1 if (ncalls==1) begstep = mpi_wtime() ! start step-timing ! ! Get edynamo inputs from neutral atmosphere model: ! if (trim(model_name) == 'TIMEGCM') then call read_tgcm(input_ncfile,itime) elseif (trim(model_name) == 'WACCM') then call read_waccm(input_ncfile,itime) endif ! ! Get apex coordinates and related parameters for current year: ! if (ncalls==1) call get_apex(year,nlon,nlat,jspole,jnpole) ! ! Get high-latitude potential from Heelis empirical model: ! (sub heelis_model is in heelis.F90) ! secs = ut*3600._r8 ! ut in seconds call heelis_model(iday,secs) ! ! Call main edynamo driver: ! (sub dynamo is in edynamo.F90) ! call dynamo(tn,un,vn,omega,zpot,barm,ped,hall, & lev0,lev1,lon0,lon1,lat0,lat1,do_integrals) ! ! Gather output to global arrays for time3d. Output is ! ed1_glb,ed2_glb(nmlonp1,nmlat) ! call edyn_gather(ed1,ed2,ed1_glb,ed2_glb,mlon0,mlon1,mlat0,mlat1, & nmlonp1,nmlat) ! ! Write to netcdf output file (sub write_output is in output.F90). ! ! If itime is the last arg, then output will cycle through histories ! on the model input file, overwriting to output in each cycle, ! resulting in the same number of output histories as on the input file. ! ! If ncalls is the last arg, then output will cycle through histories ! on the model input file, but writing output for *every* call, ! resulting in as many histories as the number of time3d iterations ! (cycling redundantly through the number of times on the input file) ! call write_output(output_ncfile,itime) ! call write_output(output_ncfile,ncalls) endstep = mpi_wtime() ! end step-timing steptime = endstep-begstep write(iulog,"('Edynamo: ncalls=',i4,' itime=',i4,' day=',i4,' ut=',f6.2)") & ncalls,itime,iday,ut enddo ! itime=1,ntime endtotal = mpi_wtime() ! end wctiming time_total=endtotal-begtotal ! call report_timing ! report timing to stdout ! call mp_close ! finalize MPI ncalls = ncalls+1 end subroutine edyn_driver !----------------------------------------------------------------------- subroutine edyn_gather(ed1,ed2,ed1_glb,ed2_glb,mlon0,mlon1,mlat0,mlat1, & nmlonp1,nmlat) ! ! Gather output for time3d to global arrays at all processors. ! Later, if time3d is executed by the root task only, this could ! be changed to gather only to the root task. ! use my_mpi,only: handle_mpi_err,ntask,tasks,mxmaglon,mxmaglat #include ! ! Args: integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nmlat real(r8),dimension(mlon0-1:mlon1+1,mlat0-1:mlat1+1),intent(in) :: ed1,ed2 real(r8),dimension(nmlat,nmlonp1),intent(out) :: ed1_glb,ed2_glb ! ! Local: integer,parameter :: nf=2 integer :: i,j,n,len,ier,nlons,nlats,& ilon0,ilon1,ilat0,ilat1 real(r8) :: sendbuf(mxmaglon,mxmaglat,nf) real(r8) :: recvbuf(mxmaglon,mxmaglat,nf,0:ntask-1) ed1_glb = 0. ; ed2_glb = 0. sendbuf = 0. ; recvbuf = 0. len = mxmaglon*mxmaglat*nf nlons = mlon1-mlon0+1 nlats = mlat1-mlat0+1 sendbuf(1:nlons,1:nlats,1) = ed1(mlon0:mlon1,mlat0:mlat1) sendbuf(1:nlons,1:nlats,2) = ed2(mlon0:mlon1,mlat0:mlat1) ! write(6,"('edyn_gather call mpi_allgather: sendbuf(..,1)=',2e12.4,' sendbuf(..,2)=',2e12.4)") & ! minval(sendbuf(1:nlons,1:nlats,1)),maxval(sendbuf(1:nlons,1:nlats,1)),& ! minval(sendbuf(1:nlons,1:nlats,2)),maxval(sendbuf(1:nlons,1:nlats,2)) call mpi_allgather(sendbuf,len,MPI_REAL8,& recvbuf,len,MPI_REAL8,MPI_COMM_WORLD,ier) if (ier /= 0) & call handle_mpi_err(ier,'edyn_gather: error from mpi_allgather') do n=0,ntask-1 ilon0 = tasks(n)%mlon0 ilon1 = tasks(n)%mlon1 ilat0 = tasks(n)%mlat0 ilat1 = tasks(n)%mlat1 nlons = ilon1-ilon0+1 nlats = ilat1-ilat0+1 do i=ilon0,ilon1 do j=ilat0,ilat1 ed1_glb(j,i) = recvbuf(i-ilon0+1,j-ilat0+1,1,n) ed2_glb(j,i) = recvbuf(i-ilon0+1,j-ilat0+1,2,n) enddo enddo enddo ! write(6,"('edyn_gather: ed1=',2e12.4,' ed2=',2e12.4,' ed1_glb=',2e12.4,' ed2_glb=',2e12.4)") & ! minval(ed1(mlon0:mlon1,mlat0:mlat1)),maxval(ed1(mlon0:mlon1,mlat0:mlat1)), & ! minval(ed2(mlon0:mlon1,mlat0:mlat1)),maxval(ed2(mlon0:mlon1,mlat0:mlat1)), & ! minval(ed1_glb),maxval(ed1_glb),minval(ed2_glb),maxval(ed2_glb) if (mytid==0) then call addfld('ED1_GLB',' ',' ',ed1_glb,'mlat',1,nmlat,'mlon',1,nmlonp1,0) call addfld('ED2_GLB',' ',' ',ed2_glb,'mlat',1,nmlat,'mlon',1,nmlonp1,0) endif end subroutine edyn_gather !----------------------------------------------------------------------- subroutine transform_glbin(zigm11_glbin,zigm22_glbin,zigm2_glbin,& zigmc_glbin,rim1_glbin,rim2_glbin,nmlat,nmlonp1) use my_mpi,only: mp_scatter_2d ! scatter solution to slave tasks use my_mpi ,only: mlon0,mlon1,mlat0,mlat1,mlev0,mlev1,mytid use edynamo,only: & !(mlon0:mlon1,mlat0:mlat1) alloc_edyn,& ! allocation subroutine zigm11, & ! sigma11*cos(theta0) zigmc, & ! sigmac zigm2, & ! sigma2 zigm22, & ! sigma22/cos(theta0) rim1,rim2 ! see description in comment below ! ! Args: integer,intent(in) :: nmlat,nmlonp1 real(r8),dimension(nmlat,nmlonp1),intent(in) :: & ! input zigm11_glbin , & zigm22_glbin , & zigm2_glbin , & zigmc_glbin , & rim1_glbin,rim2_glbin real(r8),dimension(nmlonp1,nmlat) :: & ! input zigm11_glbinx , & zigm22_glbinx , & zigm2_glbinx , & zigmc_glbinx , & rim1_glbinx,rim2_glbinx real(r8),dimension(nmlonp1,nmlat,6) :: fglb ! global rim1,2 real(r8),dimension(mlon0:mlon1,mlat0:mlat1,6) :: fsub ! subdomain rim1,2 ! ! Local: logical,save :: first=.true. integer :: i,j if (first) then ! First timestep call alloc_edyn ! allocate dynamic arrays first = .false. endif do j=1,nmlat do i=1,nmlonp1 zigm11_glbinx(i,j) = zigm11_glbin(j,i) zigm22_glbinx(i,j) = zigm22_glbin(j,i) zigmc_glbinx (i,j) = zigmc_glbin(j,i) zigm2_glbinx (i,j) = zigm2_glbin(j,i) rim1_glbinx(i,j) = rim1_glbin(j,i) rim2_glbinx(i,j) = rim2_glbin(j,i) enddo enddo ! ! Scatter time3d zigmxx and rim1,2 back to subdomains so complete_integrals ! can be called from edynamo: ! fglb(:,:,1) = zigm11_glbinx(:,:) fglb(:,:,2) = zigm22_glbinx(:,:) fglb(:,:,3) = zigm2_glbinx(:,:) fglb(:,:,4) = zigmc_glbinx(:,:) fglb(:,:,5) = rim1_glbinx(:,:) fglb(:,:,6) = rim2_glbinx(:,:) call mp_scatter_2d(fglb,fsub,6) ! fsub is output zigm11(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,1) zigm22(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,2) zigm2(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,3) zigmc(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,4) rim1(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,5) rim2(mlon0:mlon1,mlat0:mlat1) = fsub(:,:,6) call addfld('ZIGM11_GLB','ZIGM11_GLB (time3d)',' ',zigm11_glbinx,'mlon',1,nmlonp1,'mlat',1,nmlat,0) call addfld('ZIGM22_GLB','ZIGM22_GLB (time3d)',' ',zigm22_glbinx,'mlon',1,nmlonp1,'mlat',1,nmlat,0) call addfld('ZIGMC_GLB' ,'ZIGMC_GLB (time3d)' ,' ',zigmc_glbinx ,'mlon',1,nmlonp1,'mlat',1,nmlat,0) call addfld('ZIGM2_GLB' ,'ZIGM2_GLB (time3d)' ,' ',zigm2_glbinx ,'mlon',1,nmlonp1,'mlat',1,nmlat,0) call addfld('RIM1_GLB' ,'RIM1_GLB (time3d)' ,' ',rim1_glbinx,'mlon',1,nmlonp1,'mlat',1,nmlat,0) call addfld('RIM2_GLB' ,'RIM2_GLB (time3d)' ,' ',rim2_glbinx,'mlon',1,nmlonp1,'mlat',1,nmlat,0) end subroutine transform_glbin !----------------------------------------------------------------------- end module edynamo_driver