mpi: implement experimental program for data transfer algorithms used for non-local operations

This commit is contained in:
Christian Zimmermann 2024-03-28 00:56:22 +01:00
parent e65635cb0e
commit ec30ad5839
3 changed files with 197 additions and 0 deletions

View file

@ -212,6 +212,9 @@ namespace CNORXZ
template <class Index1, class Index2> template <class Index1, class Index2>
void RCArray<T>::load(const Sptr<Index1>& i1, const Sptr<Index2>& i2) const void RCArray<T>::load(const Sptr<Index1>& i1, const Sptr<Index2>& i2) const
{ {
VCHECK(i1->lex());
VCHECK(i2->lex());
/*
const SizeT rsize = getRankedSize(mGeom); const SizeT rsize = getRankedSize(mGeom);
if(mMap.size() != rsize){ if(mMap.size() != rsize){
mMap.resize(rsize); mMap.resize(rsize);
@ -228,6 +231,7 @@ namespace CNORXZ
if(recvr == getRankNumber()) { } if(recvr == getRankNumber()) { }
}, pos(i1), pos(i2) ) ); }, pos(i1), pos(i2) ) );
// MPI_Sendrecv()!!! // MPI_Sendrecv()!!!
*/
} }
} // namespace mpi } // namespace mpi

View file

@ -13,3 +13,7 @@ add_dependencies(mpirautest cnorxz cnorxzmpi test_lib)
target_link_libraries(mpirautest ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${MPI_LIBS} cnorxz cnorxzmpi test_lib) target_link_libraries(mpirautest ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${MPI_LIBS} cnorxz cnorxzmpi test_lib)
set(MPI_TEST_COMMAND mpirun -n 4 mpirautest) set(MPI_TEST_COMMAND mpirun -n 4 mpirautest)
add_test(NAME mpirautest COMMAND ${MPI_TEST_COMMAND}) add_test(NAME mpirautest COMMAND ${MPI_TEST_COMMAND})
add_executable(riexp rindex_exp.cc)
add_dependencies(riexp cnorxz cnorxzmpi test_lib)
target_link_libraries(riexp ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${MPI_LIBS} cnorxz cnorxzmpi test_lib)

View file

@ -0,0 +1,189 @@
// -*- C++ -*-
// experiment...
#include <cstdlib>
#include <iostream>
#include "cnorxz.h"
#include "cnorxz_mpi.h"
#include "test_numbers.h"
#include "rrange.cc.h"
namespace
{
using namespace CNORXZ;
using Test::Numbers;
using namespace CNORXZ::mpi;
class Env
{
public:
Env()
{
CXZ_ASSERT(getNumRanks() == 4, "exptected 4 ranks");
Vector<Int> xs(12);
Vector<Int> ts(16);
for(SizeT i = 0; i != xs.size(); ++i){
const Int x = static_cast<Int>(i) - static_cast<Int>(xs.size()/2);
xs[i] = x;
}
for(SizeT i = 0; i != ts.size(); ++i){
const Int t = static_cast<Int>(i) - static_cast<Int>(ts.size()/2);
ts[i] = t;
}
mSRange = CRangeFactory(4).create();
mXRange = URangeFactory<Int>(xs).create();
mTRange = URangeFactory<Int>(ts).create();
Vector<RangePtr> rs { mTRange, mXRange, mXRange, mXRange };
mGRange = YRangeFactory(rs).create();
RangePtr g1 = CRangeFactory(1).create();
RangePtr g2 = CRangeFactory(2).create();
Vector<RangePtr> gs { g2, g1, g1, g2 };
mGeom = YRangeFactory(gs).create();
mRRange = rrange(mGRange, mGeom);
}
RangePtr mSRange;
RangePtr mXRange;
RangePtr mTRange;
RangePtr mGRange;
RangePtr mGeom;
RangePtr mRRange;
};
}
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
Env env;
const SizeT myrank = getRankNumber();
const SizeT Nranks = getNumRanks();
typedef UIndex<Int> UI;
typedef MIndex<UI,UI,UI,UI> LocI;
typedef MIndex<CIndex,CIndex,CIndex,CIndex> RankI;
RIndex<LocI,RankI> rgi(env.mRRange);
RIndex<LocI,RankI> rgj(env.mRRange);
LocI gi(env.mGRange);
LocI gj(env.mGRange);
RankI ri(env.mGeom);
constexpr auto C0 = CSizeT<0> {};
constexpr auto C2 = CSizeT<2> {};
constexpr auto C3 = CSizeT<3> {};
const SizeT LSize = env.mRRange->sub(1)->size();
const SizeT blocks = env.mSRange->size();
Vector<Double> data(LSize*blocks);
Vector<Double> buf;
Vector<Double*> map(env.mRRange->size(),nullptr);
for(SizeT i = 0; i != data.size(); ++i){
data[i] = static_cast<Double>(LSize*myrank*blocks+i);
}
Vector<Vector<SizeT>> cnt(Nranks);
for(auto& c: cnt){
c.resize(Nranks);
}
Vector<Vector<Double>> sendbuf(Nranks);
for(auto& sb: sendbuf){
sb.reserve(data.size());
}
// First loop: setup send buffer
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() != rgj.rank()){
if(rgj.rank() == myrank){
// j = sender
const Double* d = data.data()+rgj.local()->pos()*blocks;
assert(static_cast<Int>(*d) % 4 == 0);
sendbuf[rgi.rank()].insert(sendbuf[rgi.rank()].end(), d, d+blocks);
}
++cnt[rgi.rank()][rgj.rank()];
}
}
// Initialize target buffer:
SizeT bufsize = 0;
for(const auto& c: cnt[myrank]){
bufsize += c*blocks;
}
buf.resize(bufsize);
// Transfer data:
for(SizeT o = 0; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT off = 0;
for(SizeT p = 0; p != srcr; ++p){
off += cnt[myrank][p];
}
MPI_Status stat;
MPI_Sendrecv(sendbuf[dstr].data(), cnt[dstr][myrank]*blocks, MPI_DOUBLE, dstr, 0,
buf.data()+off*blocks, cnt[myrank][srcr]*blocks, MPI_DOUBLE, srcr, 0,
MPI_COMM_WORLD, &stat);
}
Vector<Vector<SizeT>> ext = cnt;
// Second loop: Assign map to target buffer positions:
for(auto& c: cnt){
c = Vector<SizeT>(Nranks,0);
}
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() == myrank){
if(rgj.rank() != myrank){
SizeT off = 0;
for(SizeT p = 0; p != rgj.rank(); ++p){
off += ext[myrank][p];
}
map[rgj.pos()] = buf.data()+off*blocks + cnt[rgi.rank()][rgj.rank()]*blocks;
++cnt[rgi.rank()][rgj.rank()];
}
map[rgi.pos()] = data.data() + rgi.local()->pos()*blocks;
}
}
// Third loop: Check:
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() == myrank){
assert(map.data()[rgj.pos()] != nullptr);
const Double vn = *map[rgj.pos()]/blocks;
const SizeT xp = static_cast<SizeT>(vn);
const SizeT orank = xp / env.mRRange->sub(1)->size();
assert(env.mRRange->sub(1)->size() == 16*12*12*12/4);
if(myrank == 0){
std::cout << " pos = " << rgj.pos() << " , val = " << *map[rgj.pos()]
<< " , val_norm = " << vn << " , origin rank = "
<< orank << std::endl;
}
assert(orank == rgj.rank());
assert(vn == rgj.pos());
}
}
MPI_Finalize();
return 0;
}