overlappingbcrsmatrix.hh
Go to the documentation of this file.
1 // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 // vi: set et ts=4 sw=4 sts=4:
3 /*
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 2 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 
19  Consult the COPYING file in the top-level source directory of this
20  module for the precise wording of the license and the list of
21  copyright holders.
22 */
27 #ifndef EWOMS_OVERLAPPING_BCRS_MATRIX_HH
28 #define EWOMS_OVERLAPPING_BCRS_MATRIX_HH
29 
34 
35 #include <opm/common/Valgrind.hpp>
36 
37 #include <dune/istl/scalarproducts.hh>
38 #include <dune/istl/io.hh>
39 
40 #include <algorithm>
41 #include <set>
42 #include <map>
43 #include <iostream>
44 #include <vector>
45 #include <memory>
46 
47 namespace Ewoms {
48 namespace Linear {
49 
53 template <class BCRSMatrix>
54 class OverlappingBCRSMatrix : public BCRSMatrix
55 {
56  typedef BCRSMatrix ParentType;
57 
58 public:
60 
61 private:
62  typedef std::vector<std::set<Index> > Entries;
63 
64 public:
65  typedef typename ParentType::ColIterator ColIterator;
66  typedef typename ParentType::ConstColIterator ConstColIterator;
67  typedef typename ParentType::block_type block_type;
68  typedef typename ParentType::field_type field_type;
69 
70  // no real copying done at the moment
72  : ParentType(other)
73  {}
74 
75  template <class NativeBCRSMatrix>
76  OverlappingBCRSMatrix(const NativeBCRSMatrix& nativeMatrix,
77  const BorderList& borderList,
78  const BlackList& blackList,
79  unsigned overlapSize)
80  {
81  overlap_ = std::make_shared<Overlap>(nativeMatrix, borderList, blackList, overlapSize);
82  myRank_ = 0;
83 #if HAVE_MPI
84  MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
85 #endif // HAVE_MPI
86 
87  // build the overlapping matrix from the non-overlapping
88  // matrix and the overlap
89  build_(nativeMatrix);
90  }
91 
93  {
94  if (overlap_.use_count() == 0)
95  return;
96 
97  // delete all MPI buffers
98  const PeerSet& peerSet = overlap_->peerSet();
99  typename PeerSet::const_iterator peerIt = peerSet.begin();
100  typename PeerSet::const_iterator peerEndIt = peerSet.end();
101  for (; peerIt != peerEndIt; ++peerIt) {
102  ProcessRank peerRank = *peerIt;
103 
104  delete rowSizesRecvBuff_[peerRank];
105  delete rowIndicesRecvBuff_[peerRank];
106  delete entryColIndicesRecvBuff_[peerRank];
107  delete entryValuesRecvBuff_[peerRank];
108 
109  delete numRowsSendBuff_[peerRank];
110  delete rowSizesSendBuff_[peerRank];
111  delete rowIndicesSendBuff_[peerRank];
112  delete entryColIndicesSendBuff_[peerRank];
113  delete entryValuesSendBuff_[peerRank];
114  }
115  }
116 
117  ParentType& asParent()
118  { return *this; }
119 
120  const ParentType& asParent() const
121  { return *this; }
122 
126  const Overlap& overlap() const
127  { return *overlap_; }
128 
132  template <class NativeBCRSMatrix>
133  void assignAdd(const NativeBCRSMatrix& nativeMatrix)
134  {
135  // copy the native entries
136  assignFromNative(nativeMatrix);
137 
138  // communicate and add the contents of overlapping rows
139  syncAdd();
140  }
141 
148  template <class NativeBCRSMatrix>
149  void assignCopy(const NativeBCRSMatrix& nativeMatrix)
150  {
151  // copy the native entries
152  assignFromNative(nativeMatrix);
153 
154  // communicate and add the contents of overlapping rows
155  syncCopy();
156  }
157 
161  void resetFront()
162  {
163  // create an identity matrix
164  block_type idMatrix(0.0);
165  for (unsigned i = 0; i < idMatrix.size(); ++i)
166  idMatrix[i][i] = 1.0;
167 
168  int numLocal = overlap_->numLocal();
169  int numDomestic = overlap_->numDomestic();
170  for (int domRowIdx = numLocal; domRowIdx < numDomestic; ++domRowIdx) {
171  if (overlap_->isFront(domRowIdx)) {
172  // set the front rows to a diagonal 1
173  (*this)[domRowIdx] = 0.0;
174  (*this)[domRowIdx][domRowIdx] = idMatrix;
175  }
176  }
177  }
178 
179  void print() const
180  {
181  overlap_->print();
182 
183  for (int i = 0; i < this->N(); ++i) {
184  if (overlap_->isLocal(i))
185  std::cout << " ";
186  else
187  std::cout << "*";
188  std::cout << "row " << i << " ";
189 
190  typedef typename BCRSMatrix::ConstColIterator ColIt;
191  ColIt colIt = (*this)[i].begin();
192  ColIt colEndIt = (*this)[i].end();
193  for (int j = 0; j < this->M(); ++j) {
194  if (colIt != colEndIt && j == colIt.index()) {
195  ++colIt;
196  if (overlap_->isBorder(j))
197  std::cout << "|";
198  else if (overlap_->isLocal(j))
199  std::cout << "X";
200  else
201  std::cout << "*";
202  }
203  else
204  std::cout << " ";
205  }
206  std::cout << "\n" << std::flush;
207  }
208  Dune::printSparseMatrix(std::cout,
209  *static_cast<const BCRSMatrix *>(this),
210  "M",
211  "row");
212  }
213 
214  template <class NativeBCRSMatrix>
215  void assignFromNative(const NativeBCRSMatrix& nativeMatrix)
216  {
217  // first, set everything to 0,
218  BCRSMatrix::operator=(0.0);
219 
220  // then copy the domestic entries of the native matrix to the overlapping matrix
221  for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
222  Index domesticRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
223  if (domesticRowIdx < 0) {
224  continue; // row corresponds to a black-listed entry
225  }
226 
227  auto nativeColIt = nativeMatrix[nativeRowIdx].begin();
228  const auto& nativeColEndIt = nativeMatrix[nativeRowIdx].end();
229  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
230  Index domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIt.index()));
231 
232  // make sure to include all off-diagonal entries, even those which belong
233  // to DOFs which are managed by a peer process. For this, we have to
234  // re-map the column index of the black-listed index to a native one.
235  if (domesticColIdx < 0)
236  domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIt.index()));
237 
238  if (domesticColIdx < 0)
239  // there is no domestic index which corresponds to a black-listed
240  // one. this can happen if the grid overlap is larger than the
241  // algebraic one...
242  continue;
243 
244  // we need to copy the block matrices manually since it seems that (at
245  // least some versions of) Dune have an endless recursion bug when
246  // assigning dense matrices of different field type
247  const auto& src = *nativeColIt;
248  auto& dest = (*this)[static_cast<unsigned>(domesticRowIdx)][static_cast<unsigned>(domesticColIdx)];
249  for (unsigned i = 0; i < src.rows; ++i) {
250  for (unsigned j = 0; j < src.cols; ++j) {
251  dest[i][j] = static_cast<field_type>(src[i][j]);
252  }
253  }
254  }
255  }
256  }
257 
258  // communicates and adds up the contents of overlapping rows
259  void syncAdd()
260  {
261  // first, send all entries to the peers
262  const PeerSet& peerSet = overlap_->peerSet();
263  typename PeerSet::const_iterator peerIt = peerSet.begin();
264  typename PeerSet::const_iterator peerEndIt = peerSet.end();
265  for (; peerIt != peerEndIt; ++peerIt) {
266  ProcessRank peerRank = *peerIt;
267 
268  sendEntries_(peerRank);
269  }
270 
271  // then, receive entries from the peers
272  peerIt = peerSet.begin();
273  for (; peerIt != peerEndIt; ++peerIt) {
274  ProcessRank peerRank = *peerIt;
275 
276  receiveAddEntries_(peerRank);
277  }
278 
279  // finally, make sure that everything which we send was
280  // received by the peers
281  peerIt = peerSet.begin();
282  for (; peerIt != peerEndIt; ++peerIt) {
283  ProcessRank peerRank = *peerIt;
284  entryValuesSendBuff_[peerRank]->wait();
285  }
286  }
287 
288  // communicates and copies the contents of overlapping rows from
289  // the master
290  void syncCopy()
291  {
292  // first, send all entries to the peers
293  const PeerSet& peerSet = overlap_->peerSet();
294  typename PeerSet::const_iterator peerIt = peerSet.begin();
295  typename PeerSet::const_iterator peerEndIt = peerSet.end();
296  for (; peerIt != peerEndIt; ++peerIt) {
297  ProcessRank peerRank = *peerIt;
298 
299  sendEntries_(peerRank);
300  }
301 
302  // then, receive entries from the peers
303  peerIt = peerSet.begin();
304  for (; peerIt != peerEndIt; ++peerIt) {
305  ProcessRank peerRank = *peerIt;
306 
307  receiveCopyEntries_(peerRank);
308  }
309 
310  // finally, make sure that everything which we send was
311  // received by the peers
312  peerIt = peerSet.begin();
313  for (; peerIt != peerEndIt; ++peerIt) {
314  ProcessRank peerRank = *peerIt;
315  entryValuesSendBuff_[peerRank]->wait();
316  }
317  }
318 
319 private:
320  template <class NativeBCRSMatrix>
321  void build_(const NativeBCRSMatrix& nativeMatrix)
322  {
323  size_t numDomestic = overlap_->numDomestic();
324 
325  // allocate the rows
326  this->setSize(numDomestic, numDomestic);
327  this->setBuildMode(ParentType::random);
328 
329  // communicate the entries
330  buildIndices_(nativeMatrix);
331  }
332 
333  template <class NativeBCRSMatrix>
334  void buildIndices_(const NativeBCRSMatrix& nativeMatrix)
335  {
337  // first, add all local matrix entries
339  entries_.resize(overlap_->numDomestic());
340  for (unsigned nativeRowIdx = 0; nativeRowIdx < nativeMatrix.N(); ++nativeRowIdx) {
341  int domesticRowIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeRowIdx));
342  if (domesticRowIdx < 0)
343  continue;
344 
345  auto nativeColIt = nativeMatrix[nativeRowIdx].begin();
346  const auto& nativeColEndIt = nativeMatrix[nativeRowIdx].end();
347  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
348  int domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIt.index()));
349 
350  // make sure to include all off-diagonal entries, even those which belong
351  // to DOFs which are managed by a peer process. For this, we have to
352  // re-map the column index of the black-listed index to a native one.
353  if (domesticColIdx < 0) {
354  domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIt.index()));
355  }
356 
357  if (domesticColIdx < 0)
358  continue;
359 
360  entries_[static_cast<unsigned>(domesticRowIdx)].insert(domesticColIdx);
361  }
362  }
363 
365  // add the indices for all additional entries
367 
368  // first, send all our indices to all peers
369  const PeerSet& peerSet = overlap_->peerSet();
370  typename PeerSet::const_iterator peerIt = peerSet.begin();
371  typename PeerSet::const_iterator peerEndIt = peerSet.end();
372  for (; peerIt != peerEndIt; ++peerIt) {
373  ProcessRank peerRank = *peerIt;
374  sendIndices_(nativeMatrix, peerRank);
375  }
376 
377  // then recieve all indices from the peers
378  peerIt = peerSet.begin();
379  for (; peerIt != peerEndIt; ++peerIt) {
380  ProcessRank peerRank = *peerIt;
381  receiveIndices_(peerRank);
382  }
383 
384  // wait until all send operations are completed
385  peerIt = peerSet.begin();
386  for (; peerIt != peerEndIt; ++peerIt) {
387  ProcessRank peerRank = *peerIt;
388 
389  numRowsSendBuff_[peerRank]->wait();
390  rowSizesSendBuff_[peerRank]->wait();
391  rowIndicesSendBuff_[peerRank]->wait();
392  entryColIndicesSendBuff_[peerRank]->wait();
393 
394  // convert the global indices in the send buffers to domestic
395  // ones
396  globalToDomesticBuff_(*rowIndicesSendBuff_[peerRank]);
397  globalToDomesticBuff_(*entryColIndicesSendBuff_[peerRank]);
398  }
399 
401  // actually initialize the BCRS matrix structure
403 
404  // set the row sizes
405  size_t numDomestic = overlap_->numDomestic();
406  for (unsigned rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
407  unsigned numCols = 0;
408  const auto& colIndices = entries_[rowIdx];
409  auto colIdxIt = colIndices.begin();
410  const auto& colIdxEndIt = colIndices.end();
411  for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
412  if (*colIdxIt < 0)
413  // the matrix for the local process does not know about this DOF
414  continue;
415 
416  ++numCols;
417  }
418 
419  this->setrowsize(rowIdx, numCols);
420  }
421  this->endrowsizes();
422 
423  // set the indices
424  for (unsigned rowIdx = 0; rowIdx < numDomestic; ++rowIdx) {
425  const auto& colIndices = entries_[rowIdx];
426 
427  auto colIdxIt = colIndices.begin();
428  const auto& colIdxEndIt = colIndices.end();
429  for (; colIdxIt != colIdxEndIt; ++colIdxIt) {
430  if (*colIdxIt < 0)
431  // the matrix for the local process does not know about this DOF
432  continue;
433 
434  this->addindex(rowIdx, static_cast<unsigned>(*colIdxIt));
435  }
436  }
437  this->endindices();
438 
439  // free the memory occupied by the array of the matrix entries
440  entries_.clear();
441  }
442 
443  // send the overlap indices to a peer
444  template <class NativeBCRSMatrix>
445  void sendIndices_(const NativeBCRSMatrix& nativeMatrix, ProcessRank peerRank)
446  {
447 #if HAVE_MPI
448  // send size of foreign overlap to peer
449  size_t numOverlapRows = overlap_->foreignOverlapSize(peerRank);
450  numRowsSendBuff_[peerRank] = new MpiBuffer<unsigned>(1);
451  (*numRowsSendBuff_[peerRank])[0] = static_cast<unsigned>(numOverlapRows);
452  numRowsSendBuff_[peerRank]->send(peerRank);
453 
454  // allocate the buffers which hold the global indices of each row and the number
455  // of entries which need to be communicated by the respective row
456  rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
457  rowSizesSendBuff_[peerRank] = new MpiBuffer<unsigned>(numOverlapRows);
458 
459  // compute the sets of the indices of the entries which need to be send to the peer
460  typedef std::set<int> ColumnIndexSet;
461  typedef std::map<int, ColumnIndexSet> EntryTuples;
462 
463  EntryTuples entryIndices;
464  unsigned numEntries = 0; // <- total number of matrix entries to be send to the peer
465  for (unsigned overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
466  Index domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
467  Index nativeRowIdx = overlap_->domesticToNative(domesticRowIdx);
468  Index globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
469 
470  ColumnIndexSet& colIndices = entryIndices[globalRowIdx];
471 
472  auto nativeColIt = nativeMatrix[static_cast<unsigned>(nativeRowIdx)].begin();
473  const auto& nativeColEndIt = nativeMatrix[static_cast<unsigned>(nativeRowIdx)].end();
474  for (; nativeColIt != nativeColEndIt; ++nativeColIt) {
475  unsigned nativeColIdx = static_cast<unsigned>(nativeColIt.index());
476  Index domesticColIdx = overlap_->nativeToDomestic(static_cast<Index>(nativeColIdx));
477 
478  if (domesticColIdx < 0)
479  // the native column index may be blacklisted, use the corresponding
480  // index in the domestic overlap.
481  domesticColIdx = overlap_->blackList().nativeToDomestic(static_cast<Index>(nativeColIdx));
482 
483  if (domesticColIdx < 0)
484  // the column may still not be known locally, i.e. the corresponding
485  // DOF of the row is at the process's front. we don't need this
486  // entry.
487  continue;
488 
489  Index globalColIdx = overlap_->domesticToGlobal(domesticColIdx);
490  colIndices.insert(globalColIdx);
491  ++numEntries;
492  }
493  };
494 
495  // fill the send buffers
496  entryColIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numEntries);
497  Index overlapEntryIdx = 0;
498  for (unsigned overlapOffset = 0; overlapOffset < numOverlapRows; ++overlapOffset) {
499  Index domesticRowIdx = overlap_->foreignOverlapOffsetToDomesticIdx(peerRank, overlapOffset);
500  Index globalRowIdx = overlap_->domesticToGlobal(domesticRowIdx);
501 
502  (*rowIndicesSendBuff_[peerRank])[overlapOffset] = globalRowIdx;
503 
504  const ColumnIndexSet& colIndexSet = entryIndices[globalRowIdx];
505  auto* rssb = rowSizesSendBuff_[peerRank];
506  (*rssb)[overlapOffset] = static_cast<unsigned>(colIndexSet.size());
507  for (auto it = colIndexSet.begin(); it != colIndexSet.end(); ++it) {
508  int globalColIdx = *it;
509 
510  (*entryColIndicesSendBuff_[peerRank])[static_cast<unsigned>(overlapEntryIdx)] = globalColIdx;
511  ++ overlapEntryIdx;
512  }
513  }
514 
515  // actually communicate with the peer
516  rowSizesSendBuff_[peerRank]->send(peerRank);
517  rowIndicesSendBuff_[peerRank]->send(peerRank);
518  entryColIndicesSendBuff_[peerRank]->send(peerRank);
519 
520  // create the send buffers for the values of the matrix
521  // entries
522  entryValuesSendBuff_[peerRank] = new MpiBuffer<block_type>(numEntries);
523 #endif // HAVE_MPI
524  }
525 
526  // receive the overlap indices to a peer
527  void receiveIndices_(ProcessRank peerRank)
528  {
529 #if HAVE_MPI
530  // receive size of foreign overlap to peer
531  unsigned numOverlapRows;
532  auto& numRowsRecvBuff = numRowsRecvBuff_[peerRank];
533  numRowsRecvBuff.resize(1);
534  numRowsRecvBuff.receive(peerRank);
535  numOverlapRows = numRowsRecvBuff[0];
536 
537  // create receive buffer for the row sizes and receive them
538  // from the peer
539  rowSizesRecvBuff_[peerRank] = new MpiBuffer<unsigned>(numOverlapRows);
540  rowIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
541  rowSizesRecvBuff_[peerRank]->receive(peerRank);
542  rowIndicesRecvBuff_[peerRank]->receive(peerRank);
543 
544  // calculate the total number of indices which are send by the
545  // peer
546  unsigned totalIndices = 0;
547  for (unsigned i = 0; i < numOverlapRows; ++i)
548  totalIndices += (*rowSizesRecvBuff_[peerRank])[i];
549 
550  // create the buffer to store the column indices of the matrix entries
551  entryColIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(totalIndices);
552  entryValuesRecvBuff_[peerRank] = new MpiBuffer<block_type>(totalIndices);
553 
554  // communicate with the peer
555  entryColIndicesRecvBuff_[peerRank]->receive(peerRank);
556 
557  // convert the global indices in the receive buffers to
558  // domestic ones
559  globalToDomesticBuff_(*rowIndicesRecvBuff_[peerRank]);
560  globalToDomesticBuff_(*entryColIndicesRecvBuff_[peerRank]);
561 
562  // add the entries to the global entry map
563  unsigned k = 0;
564  for (unsigned i = 0; i < numOverlapRows; ++i) {
565  Index domRowIdx = (*rowIndicesRecvBuff_[peerRank])[i];
566  for (unsigned j = 0; j < (*rowSizesRecvBuff_[peerRank])[i]; ++j) {
567  Index domColIdx = (*entryColIndicesRecvBuff_[peerRank])[k];
568  entries_[static_cast<unsigned>(domRowIdx)].insert(domColIdx);
569  ++k;
570  }
571  }
572 #endif // HAVE_MPI
573  }
574 
575  void sendEntries_(ProcessRank peerRank)
576  {
577 #if HAVE_MPI
578  auto &mpiSendBuff = *entryValuesSendBuff_[peerRank];
579 
580  auto &mpiRowIndicesSendBuff = *rowIndicesSendBuff_[peerRank];
581  auto &mpiRowSizesSendBuff = *rowSizesSendBuff_[peerRank];
582  auto &mpiColIndicesSendBuff = *entryColIndicesSendBuff_[peerRank];
583 
584  // fill the send buffer
585  unsigned k = 0;
586  for (unsigned i = 0; i < mpiRowIndicesSendBuff.size(); ++i) {
587  Index domRowIdx = mpiRowIndicesSendBuff[i];
588 
589  for (Index j = 0; j < static_cast<Index>(mpiRowSizesSendBuff[i]); ++j)
590  {
591  // move to the next column which is in the overlap
592  Index domColIdx = mpiColIndicesSendBuff[k];
593 
594  // add the values of this column to the send buffer
595  mpiSendBuff[k] = (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)];
596  ++k;
597  }
598  }
599 
600  mpiSendBuff.send(peerRank);
601 #endif // HAVE_MPI
602  }
603 
604  void receiveAddEntries_(ProcessRank peerRank)
605  {
606 #if HAVE_MPI
607  auto &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
608 
609  auto &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
610  auto &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
611  auto &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
612 
613  mpiRecvBuff.receive(peerRank);
614 
615  // retrieve the values from the receive buffer
616  unsigned k = 0;
617  for (unsigned i = 0; i < mpiRowIndicesRecvBuff.size(); ++i) {
618  Index domRowIdx = mpiRowIndicesRecvBuff[i];
619  for (unsigned j = 0; j < mpiRowSizesRecvBuff[i]; ++j, ++k) {
620  Index domColIdx = mpiColIndicesRecvBuff[k];
621 
622  if (domColIdx < 0)
623  // the matrix for the current process does not know about this DOF
624  continue;
625 
626  (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)] += mpiRecvBuff[k];
627  }
628  }
629 #endif // HAVE_MPI
630  }
631 
632  void receiveCopyEntries_(int peerRank)
633  {
634 #if HAVE_MPI
635  MpiBuffer<block_type> &mpiRecvBuff = *entryValuesRecvBuff_[peerRank];
636 
637  MpiBuffer<Index> &mpiRowIndicesRecvBuff = *rowIndicesRecvBuff_[peerRank];
638  MpiBuffer<unsigned> &mpiRowSizesRecvBuff = *rowSizesRecvBuff_[peerRank];
639  MpiBuffer<Index> &mpiColIndicesRecvBuff = *entryColIndicesRecvBuff_[peerRank];
640 
641  mpiRecvBuff.receive(peerRank);
642 
643  // retrieve the values from the receive buffer
644  unsigned k = 0;
645  for (unsigned i = 0; i < mpiRowIndicesRecvBuff.size(); ++i) {
646  Index domRowIdx = mpiRowIndicesRecvBuff[i];
647  for (unsigned j = 0; j < mpiRowSizesRecvBuff[i]; ++j, ++k) {
648  Index domColIdx = mpiColIndicesRecvBuff[k];
649 
650  if (domColIdx < 0)
651  // the matrix for the current process does not know about this DOF
652  continue;
653 
654  (*this)[static_cast<unsigned>(domRowIdx)][static_cast<unsigned>(domColIdx)] = mpiRecvBuff[k];
655  }
656  }
657 #endif // HAVE_MPI
658  }
659 
660  void globalToDomesticBuff_(MpiBuffer<Index>& idxBuff)
661  {
662  for (unsigned i = 0; i < idxBuff.size(); ++i)
663  idxBuff[i] = overlap_->globalToDomestic(idxBuff[i]);
664  }
665 
666  int myRank_;
667  Entries entries_;
668  std::shared_ptr<Overlap> overlap_;
669 
670  std::map<ProcessRank, MpiBuffer<unsigned> *> numRowsSendBuff_;
671  std::map<ProcessRank, MpiBuffer<unsigned> *> rowSizesSendBuff_;
672  std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesSendBuff_;
673  std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesSendBuff_;
674  std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesSendBuff_;
675 
676  std::map<ProcessRank, MpiBuffer<unsigned> > numRowsRecvBuff_;
677  std::map<ProcessRank, MpiBuffer<unsigned> *> rowSizesRecvBuff_;
678  std::map<ProcessRank, MpiBuffer<Index> *> rowIndicesRecvBuff_;
679  std::map<ProcessRank, MpiBuffer<Index> *> entryColIndicesRecvBuff_;
680  std::map<ProcessRank, MpiBuffer<block_type> *> entryValuesRecvBuff_;
681 };
682 
683 } // namespace Linear
684 } // namespace Ewoms
685 
686 #endif
void assignCopy(const NativeBCRSMatrix &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition: overlappingbcrsmatrix.hh:149
Definition: baseauxiliarymodule.hh:37
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
Simplifies handling of buffers to be used in conjunction with MPI.
This class maps domestic row indices to and from "global" indices which is used to construct an algeb...
int Index
The type of an index of a degree of freedom.
Definition: overlaptypes.hh:44
void assignAdd(const NativeBCRSMatrix &nativeMatrix)
Assign and syncronize the overlapping matrix from a non-overlapping one.
Definition: overlappingbcrsmatrix.hh:133
An overlap aware block-compressed row storage (BCRS) matrix.
Definition: overlappingbcrsmatrix.hh:54
Expresses which degrees of freedom are blacklisted for the parallel linear solvers and which domestic...
Definition: blacklist.hh:47
const Overlap & overlap() const
Returns the domestic overlap for the process.
Definition: overlappingbcrsmatrix.hh:126
unsigned ProcessRank
The type of the rank of a process.
Definition: overlaptypes.hh:49
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...
This class creates and manages the foreign overlap given an initial list of border indices and a BCRS...
Definition: domesticoverlapfrombcrsmatrix.hh:52
void resetFront()
Set the identity matrix on the main diagonal of front indices.
Definition: overlappingbcrsmatrix.hh:161
std::list< BorderIndex > BorderList
This class managages a list of indices which are on the border of a process&#39; partition of the grid...
Definition: overlaptypes.hh:120
A set of process ranks.
Definition: overlaptypes.hh:148