Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable multiple layers of overlap for Restricted Additive Schwarz #449

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
12 changes: 12 additions & 0 deletions opm/grid/CpGrid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1327,6 +1327,11 @@ namespace Dune
return current_view_data_->cell_remote_indices_;
}

std::vector<int> cellHasWell() const
{
return *cell_has_well_;
}

#endif

private:
Expand Down Expand Up @@ -1366,6 +1371,13 @@ namespace Dune
* @warning Will only update owner cells
*/
std::shared_ptr<InterfaceMap> point_scatter_gather_interfaces_;

/// \brief Mark all cells perforated by cells.
void findWellperforatedCells(const std::vector<cpgrid::OpmWellType> * wells,
std::vector<int>& cell_has_well);

std::shared_ptr<std::vector<int>> cell_has_well_;

}; // end Class CpGrid


Expand Down
126 changes: 93 additions & 33 deletions opm/grid/common/GridPartitioning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Entity& e,
const int owner, const std::vector<int>& cell_part,
std::vector<std::tuple<int,int,char>>& exportList,
int recursion_deps)
const std::vector<int>& isWell, int recursion_deps)
{
using AttributeSet = Dune::OwnerOverlapCopyAttributeSet::AttributeSet;
const CpGrid::LeafIndexSet& ix = grid.leafIndexSet();
Expand All @@ -373,38 +373,33 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
int nb_index = ix.index(*(iit->outside()));
if ( cell_part[nb_index]!=owner )
{
// Note: multiple adds for same process are possible
exportList.emplace_back(nb_index, owner, AttributeSet::copy);
exportList.emplace_back(index, cell_part[nb_index], AttributeSet::copy);
if ( recursion_deps>0 )
{
// Add another layer
addOverlapLayer(grid, nb_index, *(iit->outside()), owner,
cell_part, exportList, recursion_deps-1);
// If on last level add cell as type: copy. If not it has partition type: overlap
if ( recursion_deps < 1 ) {
exportList.emplace_back(nb_index, owner, AttributeSet::copy);
}
else
else {
// If cell is perforated by a well add it as a copy cell, since wells are not parallel
if ( isWell[nb_index] == 1 )
exportList.emplace_back(nb_index, owner, AttributeSet::copy);
else
exportList.emplace_back(nb_index, owner, AttributeSet::overlap);
}
if ( recursion_deps > 0 )
{
// Add cells to the overlap that just share a corner with e.
for (CpGrid::LeafIntersectionIterator iit2 = iit->outside()->ileafbegin();
iit2 != iit->outside()->ileafend(); ++iit2)
{
if ( iit2->neighbor() )
{
int nb_index2 = ix.index(*(iit2->outside()));
if( cell_part[nb_index2]==owner ) continue;
addOverlapCornerCell(grid, owner, e, *(iit2->outside()),
cell_part, exportList);
}
}
// Add another layer
addOverlapLayer(grid, index, *(iit->outside()), owner,
cell_part, exportList, isWell, recursion_deps-1);
}
}
}
}
}

int addOverlapLayer(const CpGrid& grid, const std::vector<int>& cell_part,
int addOverlapLayer(const CpGrid& grid,
const std::vector<int>& cell_part,
std::vector<std::tuple<int,int,char>>& exportList,
std::vector<std::tuple<int,int,char,int>>& importList,
const std::vector<int>& cell_has_well,
const CollectiveCommunication<Dune::MPIHelper::MPICommunicator>& cc,
int layers)
{
Expand All @@ -417,22 +412,44 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>();
it != grid.leafend<0>(); ++it) {
int index = ix.index(*it);

auto owner = cell_part[index];
exportProcs.insert(std::make_pair(owner, 0));
addOverlapLayer(grid, index, *it, owner, cell_part, exportList, layers-1);
addOverlapLayer(grid, index, *it, owner, cell_part, exportList, cell_has_well, layers-1);
}

// remove multiple entries
// This predicate will result in the following ordering of
// the exportList tuples (index, rank, Attribute) after std::sort:
//
// Tuple x comes before tuple y if x[0] < y[0].
// Tuple x comes before tuple y if x[0] == y[0] and x[1] < y[1].
// Tuple x comes before tuple y if x[0] == y[0] and x[1] == y[1] and (x[2] == overlap and y[2] = copy).
// This ordering is needed to meke std::unique work properly.
auto compare = [](const std::tuple<int,int,char>& t1, const std::tuple<int,int,char>& t2)
{
return (std::get<0>(t1) < std::get<0>(t2)) ||
( ! (std::get<0>(t2) < std::get<0>(t1))
&& (std::get<1>(t1) < std::get<1>(t2)) );
};
{
return (std::get<0>(t1) < std::get<0>(t2) ) ||
( (std::get<0>(t2) == std::get<0>(t1))
&& (std::get<1>(t1) < std::get<1>(t2)) ) ||
( (std::get<0>(t2) == std::get<0>(t1)) && std::get<2>(t1) == AttributeSet::overlap
&& std::get<2>(t2) == AttributeSet::copy && (std::get<1>(t1) == std::get<1>(t2)));
};

// We do not want to add a cell twice as an overlap and copy type. To avoid this we say two tuples
// x,y in exportList are equal if x[0] == y[0] and x[1] == y[1]. Since overlap tuples come
// before the copy tuples, the copy tuples will be removed.
auto overlapEqual = [](const std::tuple<int,int,char>& t1, const std::tuple<int,int,char>& t2)
{
return ( ( std::get<0>(t1) == std::get<0>(t2) )
&& ( std::get<1>(t1) == std::get<1>(t2) ) ) ;
};

// We only sort and remove duplicate cells in the overlap layers
auto ownerEnd = exportList.begin() + ownerSize;
std::sort(ownerEnd, exportList.end(), compare);

auto newEnd = std::unique(ownerEnd, exportList.end());
// Remove duplicate cells in overlap layer.
auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual);
exportList.resize(newEnd - exportList.begin());

for(const auto& entry: importList)
Expand Down Expand Up @@ -484,17 +501,60 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc);
}

std::inplace_merge(exportList.begin(), ownerEnd, exportList.end());
MPI_Waitall(requests.size(), requests.data(), statuses.data());

// Communicate overlap type
++tag;
std::vector<std::vector<int> > typeBuffers(importProcs.size());
auto partitionTypeBuffer = typeBuffers.begin();
req = requests.begin();

for(auto&& proc: importProcs)
{
partitionTypeBuffer->resize(proc.second);
MPI_Irecv(partitionTypeBuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req));
++req; ++partitionTypeBuffer;
}

for(const auto& proc: exportProcs)
{
std::vector<int> sendBuffer;
sendBuffer.reserve(proc.second);

for (auto t = ownerEnd; t != exportList.end(); ++t) {
if ( std::get<1>(*t) == proc.first ) {
if ( std::get<2>(*t) == AttributeSet::copy)
sendBuffer.push_back(0);
else
sendBuffer.push_back(1);
}
}
MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc);
}

std::inplace_merge(exportList.begin(), ownerEnd, exportList.end());
MPI_Waitall(requests.size(), requests.data(), statuses.data());

// Add the overlap layer to the import list on each process.
buffer = receiveBuffers.begin();
partitionTypeBuffer = typeBuffers.begin();
auto importOwnerSize = importList.size();

for(const auto& proc: importProcs)
{
for(const auto& index: *buffer)
importList.emplace_back(index, proc.first, AttributeSet::copy, -1);
auto pt = partitionTypeBuffer->begin();
for(const auto& index: *buffer) {

if (*pt == 0) {
importList.emplace_back(index, proc.first, AttributeSet::copy, -1);
}
else {
importList.emplace_back(index, proc.first, AttributeSet::overlap, -1);
}
++pt;
}
++buffer;
++partitionTypeBuffer;
}
std::sort(importList.begin() + importOwnerSize, importList.end(),
[](const std::tuple<int,int,char,int>& t1, const std::tuple<int,int,char,int>& t2)
Expand Down
6 changes: 5 additions & 1 deletion opm/grid/common/GridPartitioning.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,15 @@ namespace Dune
/// \param[inout] importList List indices to import, each entry is a tuple
/// of global index, process rank (to import from), attribute here, local
/// index here
/// \param[in] cell_has_well integer list that indicate if cell i is perforated
/// by a well.
/// \param[in] cc The communication object
/// \param[in] layer Number of overlap layers
int addOverlapLayer(const CpGrid& grid, const std::vector<int>& cell_part,
int addOverlapLayer(const CpGrid& grid,
const std::vector<int>& cell_part,
std::vector<std::tuple<int,int,char>>& exportList,
std::vector<std::tuple<int,int,char,int>>& importList,
const std::vector<int>& cell_has_well,
const CollectiveCommunication<Dune::MPIHelper::MPICommunicator>& cc,
int layers = 1);

Expand Down
4 changes: 2 additions & 2 deletions opm/grid/common/ZoltanPartition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
wells,
transmissibilities,
partitionIsEmpty,
edgeWeightsMethod));
edgeWeightsMethod));
Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, *grid_and_wells,
partitionIsEmpty);
}
Expand All @@ -104,7 +104,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
int rank = cc.rank();
std::vector<int> parts(size, rank);
std::vector<std::vector<int> > wells_on_proc;
// List entry: process to export to, (global) index, attribute there (not needed?)
// List entry: (global) index, process to export to, attribute there
std::vector<std::tuple<int,int,char>> myExportList(numExport);
// List entry: process to import from, global index, attribute here, local index
// (determined later)
Expand Down
9 changes: 6 additions & 3 deletions opm/grid/common/ZoltanPartition.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,13 @@ namespace cpgrid
/// @param edgeWeightMethod The method used to calculate the weights associated
/// with the edges of the graph (uniform, transmissibilities, log thereof)
/// @param root The process number that holds the global grid.
/// @return A pair consisting of a vector that contains for each local cell of the grid the
/// @return A tuple consisting of a vector that contains for each local cell of the grid the
/// the number of the process that owns it after repartitioning,
/// and a set of names of wells that should be defunct in a parallel
/// simulation.
/// a set of names of wells that should be defunct in a parallel
/// simulation, a vector of tuples exportList(global id, owner rank, attribute)
/// containing information each rank needs for distributing the grid and a second
/// vector of tuples importList(global id, send rank, attribute, local id) containing
/// information about the cells of the grid each rank will receive.
std::tuple<std::vector<int>,std::unordered_set<std::string>,
std::vector<std::tuple<int,int,char> >,
std::vector<std::tuple<int,int,char,int> > >
Expand Down
39 changes: 36 additions & 3 deletions opm/grid/cpgrid/CpGrid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,13 +195,19 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
// 0);
// }
#endif

using AttributeSet = Dune::OwnerOverlapCopyAttributeSet::AttributeSet;

bool ownersFirst = false;

// first create the overlap

this->cell_has_well_.reset( new std::vector<int> );
this->findWellperforatedCells(wells, *this->cell_has_well_);

// map from process to global cell indices in overlap
std::map<int,std::set<int> > overlap;
auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList, cc);
auto noImportedOwner = addOverlapLayer(*this, cell_part, exportList, importList,
*this->cell_has_well_, cc, overlapLayers);
// importList contains all the indices that will be here.
auto compareImport = [](const std::tuple<int,int,char,int>& t1,
const std::tuple<int,int,char,int>&t2)
Expand Down Expand Up @@ -236,7 +242,10 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
distributed_data_->cell_indexset_.beginResize();
for(const auto& entry: importList)
{
distributed_data_->cell_indexset_.add(std::get<0>(entry), ParallelIndexSet::LocalIndex(std::get<3>(entry), AttributeSet(std::get<2>(entry)), true));
distributed_data_->cell_indexset_.add(std::get<0>(entry),
ParallelIndexSet::LocalIndex(std::get<3>(entry),
AttributeSet(std::get<2>(entry)),
true));
}
distributed_data_->cell_indexset_.endResize();
// add an interface for gathering/scattering data with communication
Expand Down Expand Up @@ -272,6 +281,30 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
#endif
}

void CpGrid::findWellperforatedCells(const std::vector<cpgrid::OpmWellType> * wells,
std::vector<int>& cell_has_well)
{
cell_has_well.resize(this->numCells(), 0);
cpgrid::WellConnections well_indices;
const auto& cpgdim = this->logicalCartesianSize();

std::vector<int> cartesian_to_compressed(cpgdim[0]*cpgdim[1]*cpgdim[2], -1);

for( int i=0; i < this->numCells(); ++i )
{
cartesian_to_compressed[this->globalCell()[i]] = i;
}
well_indices.init(*wells, cpgdim, cartesian_to_compressed);

for(const auto& well: well_indices)
{
for( auto well_idx = well.begin(); well_idx != well.end();
++well_idx)
{
cell_has_well[*well_idx] = 1;
}
}
}

void CpGrid::createCartesian(const std::array<int, 3>& dims,
const std::array<double, 3>& cellsize)
Expand Down
5 changes: 3 additions & 2 deletions opm/grid/cpgrid/CpGridData.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1574,9 +1574,10 @@ void CpGridData::distributeGlobalGrid(CpGrid& grid,
partition_type_indicator_->cell_indicator_.resize(cell_indexset_.size());
for(const auto i: cell_indexset_)
{
auto ci_attr = i.local().attribute();
partition_type_indicator_->cell_indicator_[i.local()]=
i.local().attribute()==AttributeSet::owner?
InteriorEntity:OverlapEntity;
ci_attr==AttributeSet::owner?
InteriorEntity: ci_attr==AttributeSet::copy? GhostEntity:OverlapEntity;
}

// Compute partition type for points
Expand Down