tLocalInterpCtx.cc - pism - [fork] customized build of PISM, the parallel ice sheet model (tillflux branch)
HTML git clone git://src.adamsgaard.dk/pism
DIR Log
DIR Files
DIR Refs
DIR LICENSE
---
tLocalInterpCtx.cc (5584B)
---
1 // Copyright (C) 2007-2019 Jed Brown, Ed Bueler and Constantine Khroulev
2 //
3 // This file is part of PISM.
4 //
5 // PISM is free software; you can redistribute it and/or modify it under the
6 // terms of the GNU General Public License as published by the Free Software
7 // Foundation; either version 3 of the License, or (at your option) any later
8 // version.
9 //
10 // PISM is distributed in the hope that it will be useful, but WITHOUT ANY
11 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 // FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
13 // details.
14 //
15 // You should have received a copy of the GNU General Public License
16 // along with PISM; if not, write to the Free Software
17 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
19 #include <cstring>
20 #include <cstdlib>
21 #include <algorithm> // std::min
22 #include <gsl/gsl_interp.h>
23
24 #include "File.hh"
25 #include "pism/util/pism_utilities.hh"
26 #include "LocalInterpCtx.hh"
27 #include "pism/util/ConfigInterface.hh"
28 #include "pism/util/IceGrid.hh"
29
30 #include "pism/util/interpolation.hh"
31 #include "pism/util/error_handling.hh"
32 #include "pism/util/Logger.hh"
33
34 namespace pism {
35
36 //! Compute start and count for getting a subset of x.
37 /*! Given a grid `x` we find `x_start` and `x_count` so that `[subset_x_min, subset_x_max]` is in
38 * `[x[x_start], x[x_start + x_count]]`.
39 *
40 * `x_start` and `x_count` define the smallest subset of `x` with this property.
41 *
42 * Note that `x_start + x_count <= x.size()` as long as `x` is strictly increasing.
43 *
44 * @param[in] x input grid (defined interpolation domain)
45 * @param[in] subset_x_min minimum `x` of a subset (interpolation range)
46 * @param[in] subset_x_max maxumum `x` of a subset (interpolation range)
47 * @param[out] x_start starting index
48 * @param[out] x_count number of elements required
49 */
50 static void subset_start_and_count(const std::vector<double> &x,
51 double subset_x_min, double subset_x_max,
52 unsigned int &x_start, unsigned int &x_count) {
53 unsigned int x_size = x.size();
54
55 x_start = gsl_interp_bsearch(&x[0], subset_x_min, 0, x_size - 1);
56
57 unsigned int x_end = gsl_interp_bsearch(&x[0], subset_x_max, 0, x_size - 1) + 1;
58
59 x_end = std::min(x_size - 1, x_end);
60
61 x_count = x_end - x_start + 1;
62 }
63
64 //! Construct a local interpolation context.
65 /*!
66 The essential quantities to compute are where each processor should start within the NetCDF file grid
67 (`start[]`) and how many grid points, from the starting place, the processor has. The resulting
68 portion of the grid is stored in array `a` (a field of the `LocalInterpCtx`).
69
70 We make conservative choices about `start[]` and `count[]`. In particular, the portions owned by
71 processors \e must overlap at one point in the NetCDF file grid, but they \e may overlap more than that
72 (as computed here).
73
74 Note this constructor doesn't extract new information from the NetCDF file or
75 do communication. The information from the NetCDF file must already be
76 extracted, validly stored in a grid_info structure `input`.
77
78 The `IceGrid` is used to determine what ranges of the target arrays (i.e. \c
79 Vecs into which NetCDF information will be interpolated) are owned by each
80 processor.
81 */
82 LocalInterpCtx::LocalInterpCtx(const grid_info &input, const IceGrid &grid,
83 const std::vector<double> &z_output,
84 InterpolationType type) {
85 const int T = 0, X = 1, Y = 2, Z = 3; // indices, just for clarity
86
87 grid.ctx()->log()->message(4, "\nRegridding file grid info:\n");
88 input.report(*grid.ctx()->log(), 4, grid.ctx()->unit_system());
89
90 // limits of the processor's part of the target computational domain
91 const double
92 x_min_proc = grid.x(grid.xs()),
93 x_max_proc = grid.x(grid.xs() + grid.xm() - 1),
94 y_min_proc = grid.y(grid.ys()),
95 y_max_proc = grid.y(grid.ys() + grid.ym() - 1);
96
97 // T
98 start[T] = input.t_len - 1; // use the latest time.
99 count[T] = 1; // read only one record
100
101 // X
102 subset_start_and_count(input.x, x_min_proc, x_max_proc, start[X], count[X]);
103
104 // Y
105 subset_start_and_count(input.y, y_min_proc, y_max_proc, start[Y], count[Y]);
106
107 // Z
108 start[Z] = 0; // always start at the base
109 count[Z] = std::max(input.z_len, 1u); // read at least one level
110
111 // We need a buffer for the local data, but node 0 needs to have as much
112 // storage as the node with the largest block (which may be anywhere), hence
113 // we perform a reduce so that node 0 has the maximum value.
114 unsigned int buffer_size = count[X] * count[Y] * std::max(count[Z], 1u);
115 unsigned int proc0_buffer_size = buffer_size;
116 MPI_Reduce(&buffer_size, &proc0_buffer_size, 1, MPI_UNSIGNED, MPI_MAX, 0, grid.com);
117
118 ParallelSection allocation(grid.com);
119 try {
120 if (grid.rank() == 0) {
121 buffer.resize(proc0_buffer_size);
122 } else {
123 buffer.resize(buffer_size);
124 }
125 } catch (...) {
126 allocation.failed();
127 }
128 allocation.check();
129
130 if (type == LINEAR or type == NEAREST) {
131 x.reset(new Interpolation(type, &input.x[start[X]], count[X],
132 &grid.x()[grid.xs()], grid.xm()));
133
134 y.reset(new Interpolation(type, &input.y[start[Y]], count[Y],
135 &grid.y()[grid.ys()], grid.ym()));
136
137 z.reset(new Interpolation(type, input.z, z_output));
138 } else {
139 throw RuntimeError(PISM_ERROR_LOCATION, "invalid interpolation type in LocalInterpCtx");
140 }
141 }
142
143 } // end of namespace pism