! ! Copyright (C) 2013, Northwestern University ! ! Please email questions to Wei-keng Liao ! ! This example program illustrates the interoperability of PnetCDF and netCDF. ! It uses netCDF-4 APIs to read 2D arrays in parallel to a shared file in the ! classical netCDF format. The global 2D array is partitioned among processes ! in a block-block fashion along both X and Y dimensions. To enable PnetCDF to ! carry out parallel I/O underneath, add the NF_PNETCDF option to the file open ! mode when opening the file. Compiling and run commands are given below, ! along with the results of running command "ncdump" to the input file. ! ! 1. At the time of this program was developed, the following libraries were ! used. ! HDF5 version 1.8.10 ! netCDF-c version 4.4.0 ! netcdf-fortran version 4.4.2 and ! PnetCDF version 1.4.0 ! ! 2. To build netCDF, the following configure options were used. ! ./configure --prefix=/usr/local \ ! --disable-shared \ ! --enable-netcdf-4 \ ! --enable-pnetcdf \ ! FC=mpif90 CXX=mpicxx CC=mpicc ! ! To build the netCDF Fortran library, the configure command used: ! ./configure --disable-shared \ ! FC=mpif90 CXX=mpicxx CC=mpicc \ ! FCFLAGS=-g FFLAGS=-g \ ! CPPFLAGS="-DgFortran -I/NetCDF/include -I/PnetCDF/include -I/HDF5/include" \ ! LDFLAGS="-L/NetCDF/lib -L/PnetCDF/lib -L/HDF5/lib" \ ! LIBS="-lnetcdf -lhdf5_hl -lhdf5 -lpnetcdf -lz -lcurl -ldl -lm" ! ! 3. To compile and link this example program: ! mpif77 -g -o nc4_pnc_get_vara nc4_pnc_get_vara.f \ ! -I/path/to/all/includes \ ! -L/path/to/all/libraries -lnetcdff -lnetcdf \ ! -lhdf5_hl -lhdf5 -lpnetcdf -lz -lcurl -ldl -lm ! ! 4. Input file: ! Run this program using the output file generated from nc4_pnc_put.f. ! The output file from nc4_pnc_put.f is in CDF-1 format. The screenshot ! of running command "ncdump" to the output file is given below. ! % ncdump testfile.nc ! netcdf testfile { ! // file format: CDF-1 ! dimensions: ! Y = 10 ; ! X = 10 ; ! variables: ! int var(Y, X) ; ! data: ! ! var = ! 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ! 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ! 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ! 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ! 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ! 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, ! 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, ! 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, ! 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, ! 2, 2, 2, 2, 2, 3, 3, 3, 3, 3 ; ! } ! ! 5. run command (on 4 MPI processes): ! mpiexec -machinefile hostfile -n 4 nc4_pnc_get testfile.nc ! subroutine check(err, message) implicit none include 'mpif.h' include 'netcdf.inc' integer err character(len=*) message ! It is a good idea to check returned value for possible error if (err .NE. NF_NOERR) then write(6,*) trim(message), trim(nf_strerror(err)) call MPI_Abort(MPI_COMM_WORLD, -1, err) end if end subroutine check program main implicit none include 'mpif.h' include 'netcdf.inc' character(LEN=128) filename, cmd integer argc, IARGC, err, nprocs, rank, format integer omode, ncid, varid, dimid(2) integer NX, psizes(2), gsizes(2), start(2), count(2) PARAMETER(NX=5) integer buf(NX, NX) call MPI_Init(err) call MPI_Comm_rank(MPI_COMM_WORLD, rank, err) call MPI_Comm_size(MPI_COMM_WORLD, nprocs, err) ! take filename from command-line argument if there is any call getarg(0, cmd) argc = IARGC() if (argc .NE. 1) then print*,'Usage: ',trim(cmd),' filename' goto 999 endif call getarg(1, filename) ! indicate to use PnetCDF or HDF5 to carry out parallel I/O ! Note using NF_MPIIO is no longer required, as it has been ! deprecated since NetCDF 4.6.2. omode = IOR(omode, NF_MPIIO) err = nf_open_par(filename, omode, MPI_COMM_WORLD, + MPI_INFO_NULL, ncid) call check(err, 'In nf_open_par: ') ! inquire dimension IDs and lengths err = nf_inq_dimid(ncid, "Y", dimid(2)) call check(err, 'In nf_inq_dimid Y: ') err = nf_inq_dimid(ncid, "X", dimid(1)) call check(err, 'In nf_inq_dimid X: ') err = nf_inq_dimlen(ncid, dimid(2), gsizes(2)) call check(err, 'In nf_inq_dimlen Y: ') err = nf_inq_dimlen(ncid, dimid(1), gsizes(1)) call check(err, 'In nf_inq_dimlen X: ') ! obtain variable ID */ err = nf_inq_varid(ncid, "var", varid) call check(err, 'In nf_inq_varid: ') ! create a block-block 2D data partitioning pattern psizes = 0 call MPI_Dims_create(nprocs, 2, psizes, err) if (gsizes(1) .NE. NX * psizes(1)) then print*, 'array size mismatch' goto 999 endif if (gsizes(2) .NE. NX * psizes(2)) then print*, 'array size mismatch' goto 999 endif ! check file format err = nf_inq_format(ncid, format) call check(err, 'In nf_inq_format: ') ! set to use MPI/PnetCDF collective I/O if (format .EQ. NF_FORMAT_NETCDF4 .OR. + format .EQ. NF_FORMAT_NETCDF4_CLASSIC) then err = nf_var_par_access(ncid, varid, NF_COLLECTIVE) call check(err, 'In nf_var_par_access: ') else err = nf_var_par_access(ncid, NF_GLOBAL, NF_COLLECTIVE) call check(err, 'In nf_var_par_access: ') endif ! Note that in Fortran, array indices start with 1 start(1) = NX * MOD(rank, psizes(1)) + 1 start(2) = NX * (rank / psizes(1)) + 1 count(1) = NX count(2) = NX err = nf_get_vara_int(ncid, varid, start, count, buf) call check(err, 'In nf_put_vara_int: ') ! close the file err = nf_close(ncid) call check(err, 'In nf_close: ') 999 call MPI_Finalize(err) end program main