Skip to content

Commit

Permalink
Add GhostEntity for cell partitionType(). Remove the unused seperate …
Browse files Browse the repository at this point in the history
…copy/overlap
  • Loading branch information
andrthu committed Feb 27, 2020
1 parent db0d4e9 commit 2a345ea
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 122 deletions.
122 changes: 14 additions & 108 deletions opm/grid/common/GridPartitioning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,97 +242,6 @@ namespace Dune
}
}

/// \brief seperate overlap and ghost cells
void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector<int>& cell_has_well,
std::vector<int>& part_type, int layers)
{

auto lid = grid.localIdSet();
auto gid = grid.globalIdSet();
part_type.resize(grid.numCells(), 0);

for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) {
auto elem = *it;

if (elem.partitionType() == InteriorEntity) {
auto id = lid.id(elem);

part_type[id] = 1;

for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {
auto inter = *iit;
if ( inter.neighbor() ) {
auto nab = inter.outside();
int nab_lid = lid.id(nab);
if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) {
int nab_gid = gid.id(nab);

if ( cell_has_well[nab_gid] == 1 ) {
part_type[nab_lid] = layers + 1;
}
else {
part_type[nab_lid] = 2;
}
}
}
}
}
}

int layer = 2;
while (layer < layers + 1) {
for (auto it = grid.leafbegin<0>(); it != grid.leafend<0>(); ++it) {

auto elem = *it;
int id = lid.id(elem);
bool isLayer = part_type[id] == layer || part_type[id] == layers + 1;

if (elem.partitionType() != InteriorEntity && isLayer) {
for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {

auto inter = *iit;
if ( inter.neighbor() ) {

auto nab = inter.outside();
int nab_gid = gid.id(nab);
int nab_lid = lid.id(nab);

if (nab.partitionType() != InteriorEntity && part_type[nab_lid] == 0) {
if (cell_has_well[nab_gid] == 1) {
part_type[nab_lid] = layers + 1;
}
else {
part_type[nab_lid] = layer + 1;
}
}
}
}
}
/*
else if (elem.partitionType() != InteriorEntity && part_type[id] == layers + 1) {
int gid_e = gid.id(elem);
bool isWell = cell_has_well[gid_e] == 1;
if ( isWell ) {
for (CpGrid::LeafIntersectionIterator iit = elem.ileafbegin(); iit != elem.ileafend(); ++iit) {
auto inter = *iit;
if ( inter.neighbor() ) {
auto nab = inter.outside();
int nab_gid = gid.id(nab);
int nab_lid = lid.id(nab);
if (nab.partitionType() != InteriorEntity) {
part_type[nab_lid] = layers + 1;
}
}
}
}
}
*/
}
layer++;
}
}

/// \brief Adds cells to the overlap that just share a point with an owner cell.
void addOverlapCornerCell(const CpGrid& grid, int owner,
const CpGrid::Codim<0>::Entity& from,
Expand Down Expand Up @@ -499,9 +408,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
auto ownerSize = exportList.size();
const CpGrid::LeafIndexSet& ix = grid.leafIndexSet();
std::map<int,int> exportProcs, importProcs;

std::vector<std::tuple<int,int,char>> ghostList;


for (CpGrid::Codim<0>::LeafIterator it = grid.leafbegin<0>();
it != grid.leafend<0>(); ++it) {
int index = ix.index(*it);
Expand Down Expand Up @@ -544,7 +451,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
// Remove duplicate cells in overlap layer.
auto newEnd = std::unique(ownerEnd, exportList.end(), overlapEqual);
exportList.resize(newEnd - exportList.begin());

for(const auto& entry: importList)
importProcs.insert(std::make_pair(std::get<1>(entry), 0));
//count entries to send
Expand Down Expand Up @@ -596,24 +503,24 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti

MPI_Waitall(requests.size(), requests.data(), statuses.data());

// -------------- Communicate overlap type
// Communicate overlap type
++tag;
std::vector<std::vector<int> > typeBuffers(importProcs.size());
auto tbuffer = typeBuffers.begin();
auto partitionTypeBuffer = typeBuffers.begin();
req = requests.begin();

for(auto&& proc: importProcs)
{
tbuffer->resize(proc.second);
MPI_Irecv(tbuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req));
++req; ++tbuffer;
partitionTypeBuffer->resize(proc.second);
MPI_Irecv(partitionTypeBuffer->data(), proc.second, MPI_INT, proc.first, tag, cc, &(*req));
++req; ++partitionTypeBuffer;
}

for(const auto& proc: exportProcs)
{
std::vector<int> sendBuffer;
sendBuffer.reserve(proc.second);

for (auto t = ownerEnd; t != exportList.end(); ++t) {
if ( std::get<1>(*t) == proc.first ) {
if ( std::get<2>(*t) == AttributeSet::copy)
Expand All @@ -622,21 +529,20 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
sendBuffer.push_back(1);
}
}

MPI_Send(sendBuffer.data(), proc.second, MPI_INT, proc.first, tag, cc);
}

std::inplace_merge(exportList.begin(), ownerEnd, exportList.end());
MPI_Waitall(requests.size(), requests.data(), statuses.data());
// ------------------------------


// Add the overlap layer to the import list on each process.
buffer = receiveBuffers.begin();
tbuffer = typeBuffers.begin();
partitionTypeBuffer = typeBuffers.begin();
auto importOwnerSize = importList.size();

for(const auto& proc: importProcs)
{
auto pt = tbuffer->begin();
auto pt = partitionTypeBuffer->begin();
for(const auto& index: *buffer) {

if (*pt == 0) {
Expand All @@ -648,7 +554,7 @@ void addOverlapLayer(const CpGrid& grid, int index, const CpGrid::Codim<0>::Enti
++pt;
}
++buffer;
++tbuffer;
++partitionTypeBuffer;
}
std::sort(importList.begin() + importOwnerSize, importList.end(),
[](const std::tuple<int,int,char,int>& t1, const std::tuple<int,int,char,int>& t2)
Expand Down
10 changes: 4 additions & 6 deletions opm/grid/common/GridPartitioning.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
#include <tuple>

#include <dune/common/parallel/mpihelper.hh>

namespace Dune
{

Expand Down Expand Up @@ -72,9 +71,6 @@ namespace Dune
bool recursive = false,
bool ensureConnectivity = true);

void seperateOverlapAndGhostCells(const CpGrid& grid, const std::vector<int>& cell_has_well,
std::vector<int>& part_type, int layers);

/// \brief Adds a layer of overlap cells to a partitioning.
/// \param[in] grid The grid that is partitioned.
/// \param[in] cell_part a vector containing each cells partition number.
Expand All @@ -96,13 +92,15 @@ namespace Dune
/// \param[inout] importList List indices to import, each entry is a tuple
/// of global index, process rank (to import from), attribute here, local
/// index here
/// \param[in] cell_has_well integer list that indicate if cell i is perforated
/// by a well.
/// \param[in] cc The communication object
/// \param[in] layer Number of overlap layers
int addOverlapLayer(const CpGrid& grid,
const std::vector<int>& cell_part,
const std::vector<int>& cell_part,
std::vector<std::tuple<int,int,char>>& exportList,
std::vector<std::tuple<int,int,char,int>>& importList,
const std::vector<int>& cell_has_well,
const std::vector<int>& cell_has_well,
const CollectiveCommunication<Dune::MPIHelper::MPICommunicator>& cc,
int layers = 1);

Expand Down
4 changes: 2 additions & 2 deletions opm/grid/common/ZoltanPartition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
wells,
transmissibilities,
partitionIsEmpty,
edgeWeightsMethod));
edgeWeightsMethod));
Dune::cpgrid::setCpGridZoltanGraphFunctions(zz, *grid_and_wells,
partitionIsEmpty);
}
Expand All @@ -104,7 +104,7 @@ zoltanGraphPartitionGridOnRoot(const CpGrid& cpgrid,
int rank = cc.rank();
std::vector<int> parts(size, rank);
std::vector<std::vector<int> > wells_on_proc;
// List entry: process to export to, (global) index, attribute there (not needed?)
// List entry: (global) index, process to export to, attribute there
std::vector<std::tuple<int,int,char>> myExportList(numExport);
// List entry: process to import from, global index, attribute here, local index
// (determined later)
Expand Down
9 changes: 6 additions & 3 deletions opm/grid/common/ZoltanPartition.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,13 @@ namespace cpgrid
/// @param edgeWeightMethod The method used to calculate the weights associated
/// with the edges of the graph (uniform, transmissibilities, log thereof)
/// @param root The process number that holds the global grid.
/// @return A pair consisting of a vector that contains for each local cell of the grid the
/// @return A tuple consisting of a vector that contains for each local cell of the grid the
/// the number of the process that owns it after repartitioning,
/// and a set of names of wells that should be defunct in a parallel
/// simulation.
/// a set of names of wells that should be defunct in a parallel
/// simulation, a vector of tuples exportList(global id, owner rank, attribute)
/// containing information each rank needs for distributing the grid and a second
/// vector of tuples importList(global id, send rank, attribute, local id) containing
/// information about the cells of the grid each rank will receive.
std::tuple<std::vector<int>,std::unordered_set<std::string>,
std::vector<std::tuple<int,int,char> >,
std::vector<std::tuple<int,int,char,int> > >
Expand Down
2 changes: 1 addition & 1 deletion opm/grid/cpgrid/CpGrid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
<< " Maybe scatterGrid was called before?"<<std::endl;
return std::make_pair(false, std::unordered_set<std::string>());
}

#if HAVE_MPI
auto& cc = data_->ccobj_;

Expand Down Expand Up @@ -214,7 +215,6 @@ CpGrid::scatterGrid(EdgeWeightMethod method, const std::vector<cpgrid::OpmWellTy
return std::get<0>(t1) < std::get<0>(t2);
};


if ( ! ownersFirst )
{
// merge owner and overlap sorted by global index
Expand Down
5 changes: 3 additions & 2 deletions opm/grid/cpgrid/CpGridData.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1574,9 +1574,10 @@ void CpGridData::distributeGlobalGrid(CpGrid& grid,
partition_type_indicator_->cell_indicator_.resize(cell_indexset_.size());
for(const auto i: cell_indexset_)
{
auto ci_attr = i.local().attribute();
partition_type_indicator_->cell_indicator_[i.local()]=
i.local().attribute()==AttributeSet::owner?
InteriorEntity:OverlapEntity;
ci_attr==AttributeSet::owner?
InteriorEntity: ci_attr==AttributeSet::copy? GhostEntity:OverlapEntity;
}

// Compute partition type for points
Expand Down

0 comments on commit 2a345ea

Please sign in to comment.