overlappingblockvector.hh
Go to the documentation of this file.
1 // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 // vi: set et ts=4 sw=4 sts=4:
3 /*
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 2 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 
19  Consult the COPYING file in the top-level source directory of this
20  module for the precise wording of the license and the list of
21  copyright holders.
22 */
27 #ifndef EWOMS_OVERLAPPING_BLOCK_VECTOR_HH
28 #define EWOMS_OVERLAPPING_BLOCK_VECTOR_HH
29 
30 #include "overlaptypes.hh"
31 
33 #include <opm/common/Valgrind.hpp>
34 
35 #include <dune/istl/bvector.hh>
36 #include <dune/common/fvector.hh>
37 
38 #include <memory>
39 #include <map>
40 #include <iostream>
41 
42 namespace Ewoms {
43 namespace Linear {
44 
48 template <class FieldVector, class Overlap>
49 class OverlappingBlockVector : public Dune::BlockVector<FieldVector>
50 {
51  typedef Dune::BlockVector<FieldVector> ParentType;
52  typedef Dune::BlockVector<FieldVector> BlockVector;
53 
54 public:
59  OverlappingBlockVector(const Overlap& overlap)
60  : ParentType(overlap.numDomestic()), overlap_(&overlap)
61  { createBuffers_(); }
62 
67  : ParentType(obv)
68  , numIndicesSendBuff_(obv.numIndicesSendBuff_)
69  , indicesSendBuff_(obv.indicesSendBuff_)
70  , indicesRecvBuff_(obv.indicesRecvBuff_)
71  , valuesSendBuff_(obv.valuesSendBuff_)
72  , valuesRecvBuff_(obv.valuesRecvBuff_)
73  , overlap_(obv.overlap_)
74  {}
75 
80  {}
81 
83 
86  using ParentType::operator=;
88 
93  {
94  ParentType::operator=(obv);
95  numIndicesSendBuff_ = obv.numIndicesSendBuff_;
96  indicesSendBuff_ = obv.indicesSendBuff_;
97  indicesRecvBuff_ = obv.indicesRecvBuff_;
98  valuesSendBuff_ = obv.valuesSendBuff_;
99  valuesRecvBuff_ = obv.valuesRecvBuff_;
100  overlap_ = obv.overlap_;
101  return *this;
102  }
103 
108  template <class BlockVector>
109  void assignAddBorder(const BlockVector& nativeBlockVector)
110  {
111  size_t numDomestic = overlap_->numDomestic();
112 
113  // assign the local rows from the non-overlapping block vector
114  for (unsigned domRowIdx = 0; domRowIdx < numDomestic; ++domRowIdx) {
115  Index nativeRowIdx = overlap_->domesticToNative(static_cast<Index>(domRowIdx));
116  if (nativeRowIdx < 0)
117  (*this)[domRowIdx] = 0.0;
118  else
119  (*this)[domRowIdx] = nativeBlockVector[nativeRowIdx];
120  }
121 
122  // add up the contents of border rows, for the remaining rows,
123  // get the values from their respective master process.
124  syncAddBorder();
125  }
126 
131  template <class NativeBlockVector>
132  void assign(const NativeBlockVector& nativeBlockVector)
133  {
134  Index numDomestic = overlap_->numDomestic();
135 
136  // assign the local rows from the non-overlapping block vector
137  for (Index domRowIdx = 0; domRowIdx < numDomestic; ++domRowIdx) {
138  Index nativeRowIdx = overlap_->domesticToNative(domRowIdx);
139  if (nativeRowIdx < 0)
140  (*this)[static_cast<unsigned>(domRowIdx)] = 0.0;
141  else
142  (*this)[static_cast<unsigned>(domRowIdx)] = nativeBlockVector[static_cast<unsigned>(nativeRowIdx)];
143  }
144 
145  // add up the contents of border rows, for the remaining rows,
146  // get the values from their respective master process.
147  sync();
148  }
149 
154  template <class NativeBlockVector>
155  void assignTo(NativeBlockVector& nativeBlockVector) const
156  {
157  // assign the local rows
158  size_t numNative = overlap_->numNative();
159  nativeBlockVector.resize(numNative);
160  for (unsigned nativeRowIdx = 0; nativeRowIdx < numNative; ++nativeRowIdx) {
161  Index domRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
162 
163  if (domRowIdx < 0)
164  nativeBlockVector[nativeRowIdx] = 0.0;
165  else
166  nativeBlockVector[nativeRowIdx] = (*this)[static_cast<unsigned>(domRowIdx)];
167  }
168  }
169 
174  void sync()
175  {
176  typename PeerSet::const_iterator peerIt;
177  typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
178 
179  // send all entries to all peers
180  peerIt = overlap_->peerSet().begin();
181  for (; peerIt != peerEndIt; ++peerIt) {
182  ProcessRank peerRank = *peerIt;
183  sendEntries_(peerRank);
184  }
185 
186  // recieve all entries to the peers
187  peerIt = overlap_->peerSet().begin();
188  for (; peerIt != peerEndIt; ++peerIt) {
189  ProcessRank peerRank = *peerIt;
190  receiveFromMaster_(peerRank);
191  }
192 
193  // wait until we have send everything
194  waitSendFinished_();
195  }
196 
201  void syncAdd()
202  {
203  typename PeerSet::const_iterator peerIt;
204  typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
205 
206  // send all entries to all peers
207  peerIt = overlap_->peerSet().begin();
208  for (; peerIt != peerEndIt; ++peerIt) {
209  ProcessRank peerRank = *peerIt;
210  sendEntries_(peerRank);
211  }
212 
213  // recieve all entries to the peers
214  peerIt = overlap_->peerSet().begin();
215  for (; peerIt != peerEndIt; ++peerIt) {
216  ProcessRank peerRank = *peerIt;
217  receiveAdd_(peerRank);
218  }
219 
220  // wait until we have send everything
221  waitSendFinished_();
222  }
223 
229  {
230  typename PeerSet::const_iterator peerIt;
231  typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
232 
233  // send all entries to all peers
234  peerIt = overlap_->peerSet().begin();
235  for (; peerIt != peerEndIt; ++peerIt) {
236  ProcessRank peerRank = *peerIt;
237  sendEntries_(peerRank);
238  }
239 
240  // recieve all entries to the peers
241  peerIt = overlap_->peerSet().begin();
242  for (; peerIt != peerEndIt; ++peerIt) {
243  ProcessRank peerRank = *peerIt;
244  receiveAddBorder_(peerRank);
245  }
246 
247  // wait until we have send everything
248  waitSendFinished_();
249 
250  }
251 
252  void print() const
253  {
254  for (unsigned i = 0; i < this->size(); ++i) {
255  std::cout << "row " << i << (overlap_->isLocal(i) ? " " : "*")
256  << ": " << (*this)[i] << "\n" << std::flush;
257  }
258  }
259 
260 private:
261  void createBuffers_()
262  {
263 #if HAVE_MPI
264  // create array for the front indices
265  typename PeerSet::const_iterator peerIt;
266  typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
267 
268  // send all indices to the peers
269  peerIt = overlap_->peerSet().begin();
270  for (; peerIt != peerEndIt; ++peerIt) {
271  ProcessRank peerRank = *peerIt;
272 
273  size_t numEntries = overlap_->foreignOverlapSize(peerRank);
274  numIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<unsigned> >(1);
275  indicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numEntries);
276  valuesSendBuff_[peerRank] = std::make_shared<MpiBuffer<FieldVector> >(numEntries);
277 
278  // fill the indices buffer with global indices
279  MpiBuffer<Index>& indicesSendBuff = *indicesSendBuff_[peerRank];
280  for (unsigned i = 0; i < numEntries; ++i) {
281  Index domRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, i);
282  indicesSendBuff[i] = overlap_->domesticToGlobal(domRowIdx);
283  }
284 
285  // first, send the number of indices
286  (*numIndicesSendBuff_[peerRank])[0] = static_cast<unsigned>(numEntries);
287  numIndicesSendBuff_[peerRank]->send(peerRank);
288 
289  // then, send the indices themselfs
290  indicesSendBuff.send(peerRank);
291  }
292 
293  // receive the indices from the peers
294  peerIt = overlap_->peerSet().begin();
295  for (; peerIt != peerEndIt; ++peerIt) {
296  ProcessRank peerRank = *peerIt;
297 
298  // receive size of overlap to peer
299  MpiBuffer<unsigned> numRowsRecvBuff(1);
300  numRowsRecvBuff.receive(peerRank);
301  unsigned numRows = numRowsRecvBuff[0];
302 
303  // then, create the MPI buffers
304  indicesRecvBuff_[peerRank] = std::shared_ptr<MpiBuffer<Index> >(
305  new MpiBuffer<Index>(numRows));
306  valuesRecvBuff_[peerRank] = std::shared_ptr<MpiBuffer<FieldVector> >(
307  new MpiBuffer<FieldVector>(numRows));
308  MpiBuffer<Index>& indicesRecvBuff = *indicesRecvBuff_[peerRank];
309 
310  // next, receive the actual indices
311  indicesRecvBuff.receive(peerRank);
312 
313  // finally, translate the global indices to domestic ones
314  for (unsigned i = 0; i != numRows; ++i) {
315  Index globalRowIdx = indicesRecvBuff[i];
316  Index domRowIdx = overlap_->globalToDomestic(globalRowIdx);
317 
318  indicesRecvBuff[i] = domRowIdx;
319  }
320  }
321 
322  // wait for all send operations to complete
323  peerIt = overlap_->peerSet().begin();
324  for (; peerIt != peerEndIt; ++peerIt) {
325  ProcessRank peerRank = *peerIt;
326  numIndicesSendBuff_[peerRank]->wait();
327  indicesSendBuff_[peerRank]->wait();
328 
329  // convert the global indices of the send buffer to
330  // domestic ones
331  MpiBuffer<Index>& indicesSendBuff = *indicesSendBuff_[peerRank];
332  for (unsigned i = 0; i < indicesSendBuff.size(); ++i) {
333  indicesSendBuff[i] = overlap_->globalToDomestic(indicesSendBuff[i]);
334  }
335  }
336 #endif // HAVE_MPI
337  }
338 
339  void sendEntries_(ProcessRank peerRank)
340  {
341  // copy the values into the send buffer
342  const MpiBuffer<Index>& indices = *indicesSendBuff_[peerRank];
343  MpiBuffer<FieldVector>& values = *valuesSendBuff_[peerRank];
344  for (unsigned i = 0; i < indices.size(); ++i)
345  values[i] = (*this)[static_cast<unsigned>(indices[i])];
346 
347  values.send(peerRank);
348  }
349 
350  void waitSendFinished_()
351  {
352  typename PeerSet::const_iterator peerIt;
353  typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
354 
355  // send all entries to all peers
356  peerIt = overlap_->peerSet().begin();
357  for (; peerIt != peerEndIt; ++peerIt) {
358  ProcessRank peerRank = *peerIt;
359  valuesSendBuff_[peerRank]->wait();
360  }
361  }
362 
363  void receiveFromMaster_(ProcessRank peerRank)
364  {
365  const MpiBuffer<Index>& indices = *indicesRecvBuff_[peerRank];
366  MpiBuffer<FieldVector>& values = *valuesRecvBuff_[peerRank];
367 
368  // receive the values from the peer
369  values.receive(peerRank);
370 
371  // copy them into the block vector
372  for (unsigned j = 0; j < indices.size(); ++j) {
373  Index domRowIdx = indices[j];
374  if (overlap_->masterRank(domRowIdx) == peerRank) {
375  (*this)[static_cast<unsigned>(domRowIdx)] = values[j];
376  }
377  }
378  }
379 
380  void receiveAddBorder_(ProcessRank peerRank)
381  {
382  const MpiBuffer<Index>& indices = *indicesRecvBuff_[peerRank];
383  MpiBuffer<FieldVector>& values = *valuesRecvBuff_[peerRank];
384 
385  // receive the values from the peer
386  values.receive(peerRank);
387 
388  // add up the values of rows on the shared boundary
389  for (unsigned j = 0; j < indices.size(); ++j) {
390  Index domRowIdx = indices[j];
391  if (overlap_->isBorderWith(domRowIdx, peerRank))
392  (*this)[static_cast<unsigned>(domRowIdx)] += values[j];
393  else
394  (*this)[static_cast<unsigned>(domRowIdx)] = values[j];
395  }
396  }
397 
398  void receiveAdd_(ProcessRank peerRank)
399  {
400  const MpiBuffer<Index>& indices = *indicesRecvBuff_[peerRank];
401  MpiBuffer<FieldVector>& values = *valuesRecvBuff_[peerRank];
402 
403  // receive the values from the peer
404  values.receive(peerRank);
405 
406  // add up the values of rows on the shared boundary
407  for (unsigned j = 0; j < indices.size(); ++j) {
408  Index domRowIdx = indices[j];
409  (*this)[static_cast<unsigned>(domRowIdx)] += values[j];
410  }
411  }
412 
413  std::map<ProcessRank, std::shared_ptr<MpiBuffer<unsigned> > > numIndicesSendBuff_;
414  std::map<ProcessRank, std::shared_ptr<MpiBuffer<Index> > > indicesSendBuff_;
415  std::map<ProcessRank, std::shared_ptr<MpiBuffer<Index> > > indicesRecvBuff_;
416  std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesSendBuff_;
417  std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesRecvBuff_;
418 
419  const Overlap *overlap_;
420 };
421 
422 } // namespace Linear
423 } // namespace Ewoms
424 
425 #endif
Definition: baseauxiliarymodule.hh:37
Simplifies handling of buffers to be used in conjunction with MPI.
An overlap aware block vector.
Definition: overlappingblockvector.hh:49
void syncAddBorder()
Syncronize all values of the block vector from the master rank, but add up the entries on the border...
Definition: overlappingblockvector.hh:228
OverlappingBlockVector(const OverlappingBlockVector &obv)
Copy constructor.
Definition: overlappingblockvector.hh:66
OverlappingBlockVector & operator=(const OverlappingBlockVector &obv)
Assignment operator.
Definition: overlappingblockvector.hh:92
int Index
The type of an index of a degree of freedom.
Definition: overlaptypes.hh:44
OverlappingBlockVector(const Overlap &overlap)
Given a domestic overlap object, create an overlapping block vector coherent to it.
Definition: overlappingblockvector.hh:59
void sync()
Syncronize all values of the block vector from their master process.
Definition: overlappingblockvector.hh:174
This files provides several data structures for storing tuples of indices of remote and/or local proc...
void assignTo(NativeBlockVector &nativeBlockVector) const
Assign the local values to a non-overlapping block vector.
Definition: overlappingblockvector.hh:155
void assign(const NativeBlockVector &nativeBlockVector)
Assign an overlapping block vector from a non-overlapping one, border entries are assigned using thei...
Definition: overlappingblockvector.hh:132
OverlappingBlockVector()
Default constructor.
Definition: overlappingblockvector.hh:79
unsigned ProcessRank
The type of the rank of a process.
Definition: overlaptypes.hh:49
void assignAddBorder(const BlockVector &nativeBlockVector)
Assign an overlapping block vector from a non-overlapping one, border entries are added...
Definition: overlappingblockvector.hh:109
void syncAdd()
Syncronize all values of the block vector by adding up the values of all peer ranks.
Definition: overlappingblockvector.hh:201