/* Copyright (C) 2013, Northwestern University Please email questions to Wei-keng Liao This example program illustrates the interoperability of PnetCDF and netCDF. It uses netCDF-4 APIs to write 2D arrays in parallel to a shared file in the classical netCDF format. The global 2D array is partitioned among processes in a block-block fashion along both X and Y dimensions. To enable PnetCDF to carry out parallel I/O underneath, add the NC_PNETCDF option to the file open mode when creating the file. Compiling and run commands are given below, along with the results of running command "ncdump" to the output file. 1. At the time of this program was developed, the following libraries were used. HDF5 version 1.8.10 netCDF version 4.4.0 and PnetCDF version 1.4.0 2. To build and install netCDF, the following configure options were used. ./configure --prefix=/path/netCDF \ --enable-pnetcdf \ CC=mpicc \ CPPFLAGS="-I/path/PnetCDF/include -I/path/HDF5/include" \ LDFLAGS="-L/path/PnetCDF/lib -L/path/HDF5/lib" make install 3. To compile and link this example program: mpicc -O2 -o nc4_pnc_put.c ./nc4_pnc_put \ -I/path/PnetCDF/include -I/path/netCDF/include -I/path/HDF5/include \ -L/path/PnetCDF/lib -L/path/netCDF/lib -L/path/HDF5/lib \ -lnetcdf -lhdf5_hl -lhdf5 -lpnetcdf -lz -lcurl -ldl -lm 4. run command (on 4 MPI processes): mpiexec -machinefile hostfile -n 4 ./nc4_pnc_put testfile.nc 5. Results: an output file in CDF-1 format will be created. The screenshot of running command "ncdump" to the output file is given below. % /path/netCDF/bin/ncdump testfile.nc netcdf testfile { dimensions: Y = 10 ; X = 10 ; variables: int var(Y, X) ; data: var = 10000, 10001, 10002, 10003, 10004, 10100, 10101, 10102, 10103, 10104, 10005, 10006, 10007, 10008, 10009, 10105, 10106, 10107, 10108, 10109, 10010, 10011, 10012, 10013, 10014, 10110, 10111, 10112, 10113, 10114, 10015, 10016, 10017, 10018, 10019, 10115, 10116, 10117, 10118, 10119, 10020, 10021, 10022, 10023, 10024, 10120, 10121, 10122, 10123, 10124, 10200, 10201, 10202, 10203, 10204, 10300, 10301, 10302, 10303, 10304, 10205, 10206, 10207, 10208, 10209, 10305, 10306, 10307, 10308, 10309, 10210, 10211, 10212, 10213, 10214, 10310, 10311, 10312, 10313, 10314, 10215, 10216, 10217, 10218, 10219, 10315, 10316, 10317, 10318, 10319, 10220, 10221, 10222, 10223, 10224, 10320, 10321, 10322, 10323, 10324 ; } */ #include #include #include #include #include #include #define FATAL_ERR {if(err!=NC_NOERR) {printf("Error at line=%d: %s Aborting ...\n", __LINE__, nc_strerror(err)); goto fn_exit;}} #define ERR {if(err!=NC_NOERR)printf("Error at line=%d: %s\n", __LINE__, nc_strerror(err));} #define NVARS 4 #define NX 5 int main(int argc, char* argv[]) { int i, rank, nprocs, err, ncid, cmode, varid, mpi_namelen, verbose=1; int dimid[2], psizes[2], *buf; size_t start[2], count[2], gsizes[2]; MPI_Info info=MPI_INFO_NULL; char mpi_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Get_processor_name(mpi_name,&mpi_namelen); if (argc != 2) { if (!rank) printf("Usage: %s filename\n",argv[0]); goto fn_exit; } if (verbose) printf("rank %2d runs on host %s\n",rank,mpi_name); /* create file and indicate the parallel I/O method: * use PnetCDF : no additional flag is needed * use HDF5 : add flag NC_NETCDF4, i.e. cmode |= NC_NETCDF4; * Using NC_MPIIO is no longer required, as it has been deprecated since * NetCDF 4.6.2 */ cmode = NC_CLOBBER | NC_MPIIO; err = nc_create_par(argv[1], cmode, MPI_COMM_WORLD, info, &ncid); FATAL_ERR /* free info object */ if (info != MPI_INFO_NULL) MPI_Info_free(&info); /* create a block-block 2D data partitioning pattern */ psizes[0] = psizes[1] = 0; MPI_Dims_create(nprocs, 2, psizes); gsizes[0] = NX * psizes[0]; gsizes[1] = NX * psizes[1]; /* define dimensions */ err = nc_def_dim(ncid, "Y", gsizes[0], &dimid[0]); ERR err = nc_def_dim(ncid, "X", gsizes[1], &dimid[1]); ERR /* define a 2D variable of integer type */ err = nc_def_var(ncid, "var", NC_INT, 2, dimid, &varid); ERR /* exit define mode */ err = nc_enddef(ncid); ERR /* set the access method to use MPI collective I/O */ if (cmode & NC_NETCDF4) { err = nc_var_par_access(ncid, varid, NC_COLLECTIVE); ERR } else { err = nc_var_par_access(ncid, NC_GLOBAL, NC_COLLECTIVE); ERR } /* initialize the local 2D subarray */ buf = (int*) malloc(NX * NX * sizeof(int)); for (i=0; i