complete WIPs from previous commit

This commit is contained in:
Christian Zimmermann 2024-04-15 23:03:55 +02:00
parent adfb0fda67
commit a037598775
14 changed files with 427 additions and 440 deletions

View file

@ -17,39 +17,53 @@
namespace CNORXZ
{
template <class TarIndex, class SrcIndex, class F>
static void setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m)
void
MapSetup<TarIndex,SrcIndex,F>::setup(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m)
{
auto six = *si;
auto sie = si->range()->end();
auto tix = *ti;
for(six = 0; six != sie; ++six){
tix.at( f(*six) );
if(six.rank() == getRankNumber()){
(*m)[six.local()->lex()] = tix.pos();
}
(*m)[six->lex()] = tix.pos();
}
}
template <class TarIndex, class SrcIndex, class F>
static Sptr<Vector<SizeT>> setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f)
Sptr<Vector<SizeT>>
MapSetup<TarIndex,SrcIndex,F>::setup(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f)
{
auto o = std::make_shared<Vector<SizeT>>(si->local()->lmax().val());
auto o = std::make_shared<Vector<SizeT>>(si->lmax().val());
setupMap(ti,si,f,o);
return o;
}
template <class TarIndex, class SrcIndex, class F>
void setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m)
{
MapSetup<TarIndex,SrcIndex,F>::setup(ti,si,f,m);
}
template <class TarIndex, class SrcIndex, class F>
Sptr<Vector<SizeT>> setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f)
{
return MapSetup<TarIndex,SrcIndex,F>::setup(ti,si,f);
}
template <class TarIndex, class SrcIndex, class Xpr>
template <class F>
MapXpr<TarIndex,SrcIndex,Xpr>::MapXpr(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, Xpr&& xpr) :
mTi(ti), mSi(si),
mMap(std::make_shared<Vector<SizeT>>(mSi->local()->lmax().val())),
mMap(nullptr),
mXpr(std::forward<Xpr>(xpr)),
mExt(mkFPos( mXpr.rootSteps(mTi->id()), mMap->data() ))
{
setupMap(ti,si,f,mMap);
mMap = setupMap(ti,si,f);
}
template <class TarIndex, class SrcIndex, class Xpr>

View file

@ -16,14 +16,25 @@
namespace CNORXZ
{
template <class TarIndex, class SrcIndex, class F>
static void setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m);
template <class TarIndex, class SrcIndex, class F>
static Sptr<Vector<SizeT>> setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f);
struct MapSetup
{
static void setup(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m);
static Sptr<Vector<SizeT>> setup(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f);
};
template <class TarIndex, class SrcIndex, class F>
void setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f, const Sptr<Vector<SizeT>>& m);
template <class TarIndex, class SrcIndex, class F>
Sptr<Vector<SizeT>> setupMap(const Sptr<TarIndex>& ti, const Sptr<SrcIndex>& si,
const F& f);
template <class TarIndex, class SrcIndex, class Xpr>
class MapXpr : public XprInterface<MapXpr<TarIndex,SrcIndex,Xpr>>
{

View file

@ -14,3 +14,4 @@
#include "rrange.cc.h"
#include "raindex.cc.h"
#include "rarray.cc.h"
#include "rmap_xpr.cc.h"

View file

@ -16,5 +16,6 @@
#include "rarray.h"
#include "rop_types.h"
#include "typemap.h"
#include "rmap_xpr.h"
#include "cnorxz_mpi.cc.h"

View file

@ -225,6 +225,7 @@ namespace CNORXZ
void RCArray<T>::load(const Sptr<Index1>& lpi, const Sptr<Index2>& ai,
const Sptr<Vector<SizeT>>& imap) const
{
// TODO: use setupBuffer from the test!!!
// TODO: blocks!!!
const SizeT blocks = 0; assert(0); // TODO!!!

View file

@ -0,0 +1,49 @@
// -*- C++ -*-
/**
@file opt/include/rmap_xpr.cc.h
@brief MPI specific specializations for MapXpr
Copyright (c) 2024 Christian Zimmermann. All rights reserved.
Mail: chizeta@f3l.de
**/
#ifndef __cxz_mpi_rmap_xpr_cc_h__
#define __cxz_mpi_rmap_xpr_cc_h__
#include "rmap_xpr.h"
namespace CNORXZ
{
template <class TarIndex, class SrcI, class RSrcI, class F>
void
MapSetup<TarIndex,mpi::RIndex<SrcI,RSrcI>,F>::setup(const Sptr<TarIndex>& ti,
const Sptr<mpi::RIndex<SrcI,RSrcI>>& si,
const F& f, const Sptr<Vector<SizeT>>& m)
{
auto six = *si;
auto sie = si->range()->end();
auto tix = *ti;
for(six = 0; six != sie; ++six){
tix.at( f(*six) );
if(six.rank() == mpi::getRankNumber()){
(*m)[six.local()->lex()] = tix.pos();
}
}
}
template <class TarIndex, class SrcI, class RSrcI, class F>
Sptr<Vector<SizeT>>
MapSetup<TarIndex,mpi::RIndex<SrcI,RSrcI>,F>::setup(const Sptr<TarIndex>& ti,
const Sptr<mpi::RIndex<SrcI,RSrcI>>& si,
const F& f)
{
auto o = std::make_shared<Vector<SizeT>>(si->local()->lmax().val());
setup(ti,si,f,o);
return o;
}
}
#endif

View file

@ -0,0 +1,32 @@
// -*- C++ -*-
/**
@file opt/include/rmap_xpr.h
@brief MPI specific specializations for MapXpr
Copyright (c) 2024 Christian Zimmermann. All rights reserved.
Mail: chizeta@f3l.de
**/
#ifndef __cxz_mpi_rmap_xpr_h__
#define __cxz_mpi_rmap_xpr_h__
#include "xpr/map_xpr.h"
#include "rrange.h"
namespace CNORXZ
{
template <class TarIndex, class SrcI, class RSrcI, class F>
struct MapSetup<TarIndex,mpi::RIndex<SrcI,RSrcI>,F>
{
static void setup(const Sptr<TarIndex>& ti, const Sptr<mpi::RIndex<SrcI,RSrcI>>& si,
const F& f, const Sptr<Vector<SizeT>>& m);
static Sptr<Vector<SizeT>> setup(const Sptr<TarIndex>& ti,
const Sptr<mpi::RIndex<SrcI,RSrcI>>& si,
const F& f);
};
}
#endif

View file

@ -12,6 +12,8 @@
#ifndef __cxz_mpi_rop_types_h__
#define __cxz_mpi_rop_types_h__
#include "operation/operation.h"
namespace CNORXZ
{
namespace mpi

View file

@ -14,6 +14,8 @@ target_link_libraries(mpirautest ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INI
set(MPI_TEST_COMMAND mpirun -n 4 mpirautest)
add_test(NAME mpirautest COMMAND ${MPI_TEST_COMMAND})
add_executable(riexp rindex_exp.cc)
add_dependencies(riexp cnorxz cnorxzmpi test_lib)
target_link_libraries(riexp ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${MPI_LIBS} cnorxz cnorxzmpi test_lib)
add_executable(mpisbutest setbuf_unit_test.cc)
add_dependencies(mpisbutest cnorxz cnorxzmpi test_lib)
target_link_libraries(mpisbutest ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${MPI_LIBS} cnorxz cnorxzmpi test_lib)
set(MPI_TEST_COMMAND mpirun -n 4 mpisbutest)
add_test(NAME mpisbutest COMMAND ${MPI_TEST_COMMAND})

View file

@ -0,0 +1,47 @@
// -*- C++ -*-
/**
@file opt/mpi/tests/mpi_env.h
@brief MPI Environment.
Copyright (c) 2024 Christian Zimmermann. All rights reserved.
Mail: chizeta@f3l.de
**/
#ifndef __cxz_mpi_mpi_env_h__
#define __cxz_mpi_mpi_env_h__
#include <cstdlib>
#include "gtest/gtest.h"
namespace CNORXZ
{
namespace mpi
{
class MPIEnv : public ::testing::Environment
{
public:
MPIEnv(int argc, char** argv) : mArgc(argc), mArgv(argv) {}
virtual ~MPIEnv() override {}
virtual void SetUp() override
{
MPI_Init(&mArgc, &mArgv);
}
virtual void TearDown() override
{
MPI_Finalize();
}
protected:
int mArgc;
char** mArgv;
};
}
}
#endif

View file

@ -19,6 +19,7 @@
#include "test_numbers.h"
#include "rrange.cc.h"
#include "rarray.cc.h"
#include "mpi_env.h"
namespace
{
@ -26,30 +27,6 @@ namespace
using Test::Numbers;
using namespace CNORXZ::mpi;
class MPIEnv : public ::testing::Environment
{
public:
MPIEnv(int argc, char** argv) : mArgc(argc), mArgv(argv) {}
virtual ~MPIEnv() override {}
virtual void SetUp() override
{
MPI_Init(&mArgc, &mArgv);
}
virtual void TearDown() override
{
MPI_Finalize();
}
protected:
int mArgc;
char** mArgv;
};
class RCArray_Test : public ::testing::Test
{
protected:

View file

@ -1,375 +0,0 @@
// -*- C++ -*-
// experiment...
#include <cstdlib>
#include <iostream>
#include "cnorxz.h"
#include "cnorxz_mpi.h"
#include "test_numbers.h"
#include "rrange.cc.h"
namespace
{
using namespace CNORXZ;
using Test::Numbers;
using namespace CNORXZ::mpi;
class Env
{
public:
Env()
{
CXZ_ASSERT(getNumRanks() == 4, "exptected 4 ranks");
Vector<Int> xs(L);
Vector<Int> ts(T);
for(SizeT i = 0; i != xs.size(); ++i){
const Int x = static_cast<Int>(i) - static_cast<Int>(xs.size()/2);
xs[i] = x;
}
for(SizeT i = 0; i != ts.size(); ++i){
const Int t = static_cast<Int>(i) - static_cast<Int>(ts.size()/2);
ts[i] = t;
}
mSRange = CRangeFactory(4).create();
mXRange = URangeFactory<Int>(xs).create();
mTRange = URangeFactory<Int>(ts).create();
Vector<RangePtr> rs { mTRange, mXRange, mXRange, mXRange };
mGRange = YRangeFactory(rs).create();
RangePtr g1 = CRangeFactory(1).create();
RangePtr g2 = CRangeFactory(2).create();
Vector<RangePtr> gs { g2, g1, g1, g2 };
mGeom = YRangeFactory(gs).create();
mRRange = rrange(mGRange, mGeom);
}
SizeT T = 16;
SizeT L = 12;
RangePtr mSRange;
RangePtr mXRange;
RangePtr mTRange;
RangePtr mGRange;
RangePtr mGeom;
RangePtr mRRange;
};
template <class TarIndex, class SrcIndex, typename T>
void setupBuffer(const Sptr<TarIndex>& rgj, const Sptr<SrcIndex>& rgi,
const Sptr<Vector<SizeT>>& fmap, const Vector<T>& data,
Vector<T>& buf, Vector<const T*>& map, const SizeT blocks)
{
const SizeT myrank = getRankNumber();
const SizeT Nranks = getNumRanks();
const SizeT mapsize = rgj->range()->size();
map = Vector<const T*>(mapsize,nullptr);
Vector<Vector<T>> sendbuf(Nranks);
for(auto& sb: sendbuf){
sb.reserve(data.size());
}
Vector<Vector<SizeT>> request(Nranks);
const SizeT locsz = rgi->local()->lmax().val();
// First loop: setup send buffer
rgi->ifor( mapXpr(rgj, rgi, fmap,
operation
( [&](SizeT p, SizeT q) {
const SizeT r = p / locsz;
if(myrank != r){
request[r].push_back(p % locsz);
}
} , posop(rgj), posop(rgi) ) ) ,
NoF {} )();
// transfer:
Vector<SizeT> reqsizes(Nranks);
SizeT bufsize = 0;
Vector<Vector<SizeT>> ext(Nranks);
for(auto& e: ext){
e.resize(Nranks);
}
for(SizeT i = 0; i != Nranks; ++i){
reqsizes[i] = request[i].size();
bufsize += reqsizes[i]*blocks;
ext[myrank][i] = reqsizes[i];
}
buf.resize(bufsize);
MPI_Status stat;
// transfer requests:
for(SizeT o = 1; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT sendsize = 0;
MPI_Sendrecv(reqsizes.data()+dstr, 1, MPI_UNSIGNED_LONG, dstr, 0,
&sendsize, 1, MPI_UNSIGNED_LONG, srcr, 0, MPI_COMM_WORLD, &stat);
ext[srcr][myrank] = sendsize;
Vector<SizeT> sendpos(sendsize);
MPI_Sendrecv(request[dstr].data(), reqsizes[dstr], MPI_UNSIGNED_LONG, dstr, 0,
sendpos.data(), sendsize, MPI_UNSIGNED_LONG, srcr, 0, MPI_COMM_WORLD, &stat);
sendbuf[srcr].resize(sendsize*blocks);
for(SizeT i = 0; i != sendsize; ++i){
std::memcpy( sendbuf[srcr].data()+i*blocks, data.data()+sendpos[i]*blocks, blocks*sizeof(T) );
}
}
const MPI_Datatype dt = Typemap<T>::value();
// transfer data:
for(SizeT o = 1; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT off = 0;
for(SizeT p = 0; p != srcr; ++p){
off += ext[myrank][p];
}
MPI_Sendrecv(sendbuf[dstr].data(), ext[dstr][myrank]*blocks, dt, dstr, 0,
buf.data()+off*blocks, ext[myrank][srcr]*blocks, dt, srcr, 0,
MPI_COMM_WORLD, &stat);
}
// Second loop: Assign map to target buffer positions:
Vector<SizeT> cnt(Nranks);
rgi->ifor( mapXpr(rgj, rgi, fmap,
operation
( [&](SizeT p, SizeT q) {
const SizeT r = p / locsz;
if(myrank != r){
SizeT off = 0;
for(SizeT s = 0; s != r; ++s){
off += ext[myrank][s];
}
map[p] = buf.data() + off*blocks + cnt[r]*blocks;
++cnt[r];
}
map[q + myrank*locsz] = data.data() + q*blocks;
} , posop(rgj), posop(rgi) ) ), NoF {} )();
}
}
void run2(const Env& env)
{
const SizeT myrank = getRankNumber();
//const SizeT Nranks = getNumRanks();
typedef UIndex<Int> UI;
typedef MIndex<UI,UI,UI,UI> LocI;
typedef MIndex<CIndex,CIndex,CIndex,CIndex> RankI;
const SizeT T = env.T;
const SizeT L = env.L;
auto rgi = std::make_shared<RIndex<LocI,RankI>>(env.mRRange);
auto rgj = std::make_shared<RIndex<LocI,RankI>>(env.mRRange);
auto rgk = std::make_shared<RIndex<LocI,RankI>>(env.mRRange);
LocI gi(env.mGRange);
LocI gj(env.mGRange);
auto ri = std::make_shared<RankI>(env.mGeom);
constexpr auto C0 = CSizeT<0> {};
//constexpr auto C1 = CSizeT<1> {};
constexpr auto C2 = CSizeT<2> {};
constexpr auto C3 = CSizeT<3> {};
const SizeT LSize = env.mRRange->sub(1)->size();
const SizeT blocks = env.mSRange->size();
Vector<Double> data(LSize*blocks);
for(SizeT i = 0; i != data.size(); ++i){
data[i] = static_cast<Double>(LSize*myrank*blocks+i);
}
*rgj = 0;
while(rgj->rank() != 1){
++*rgj;
}
*rgj->local() = 0;
Vector<Double> buf;
Vector<const Double*> map(env.mRRange->size(),nullptr);
auto shift = [&](const auto& x){
auto o = x;
std::get<0>(o) += 1;
if(std::get<0>(o) >= static_cast<int>(T)/2) { std::get<0>(o) -= T; }
std::get<2>(o) += 1;
if(std::get<2>(o) >= static_cast<int>(L)/2) { std::get<2>(o) -= L; }
std::get<3>(o) += 1;
if(std::get<3>(o) >= static_cast<int>(L)/2) { std::get<3>(o) -= L; }
return o;
};
const Sptr<Vector<SizeT>> fmap = setupMap(rgj, rgi, shift);
setupBuffer(rgj, rgi, fmap, data, buf, map, env.mSRange->size());
// Third loop: Check:
for(*rgi = 0, gi = 0; rgi->lex() != rgi->lmax().val(); ++*rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
*rgj = gj.lex();
if(rgi->rank() == myrank){
assert(map.data()[rgj->pos()] != nullptr);
const Double vn = *map[rgj->pos()]/blocks;
const SizeT xp = static_cast<SizeT>(vn);
const SizeT orank = xp / env.mRRange->sub(1)->size();
assert(env.mRRange->sub(1)->size() == 16*12*12*12/4);
if(myrank == 0){
std::cout << " pos = " << rgj->pos() << " , val = " << *map[rgj->pos()]
<< " , val_norm = " << vn << " , origin rank = "
<< orank << std::endl;
}
assert(orank == rgj->rank());
assert(vn == rgj->pos());
}
}
CHECK;
MPI_Barrier(MPI_COMM_WORLD);
}
void run1(const Env& env)
{
const SizeT myrank = getRankNumber();
const SizeT Nranks = getNumRanks();
typedef UIndex<Int> UI;
typedef MIndex<UI,UI,UI,UI> LocI;
typedef MIndex<CIndex,CIndex,CIndex,CIndex> RankI;
RIndex<LocI,RankI> rgi(env.mRRange);
RIndex<LocI,RankI> rgj(env.mRRange);
LocI gi(env.mGRange);
LocI gj(env.mGRange);
RankI ri(env.mGeom);
constexpr auto C0 = CSizeT<0> {};
constexpr auto C2 = CSizeT<2> {};
constexpr auto C3 = CSizeT<3> {};
const SizeT LSize = env.mRRange->sub(1)->size();
const SizeT blocks = env.mSRange->size();
Vector<Double> data(LSize*blocks);
Vector<Double> buf;
Vector<Double*> map(env.mRRange->size(),nullptr);
for(SizeT i = 0; i != data.size(); ++i){
data[i] = static_cast<Double>(LSize*myrank*blocks+i);
}
Vector<Vector<SizeT>> cnt(Nranks);
for(auto& c: cnt){
c.resize(Nranks);
}
Vector<Vector<Double>> sendbuf(Nranks);
for(auto& sb: sendbuf){
sb.reserve(data.size());
}
// First loop: setup send buffer
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() != rgj.rank()){
if(rgj.rank() == myrank){
// j = sender
const Double* d = data.data()+rgj.local()->pos()*blocks;
assert(static_cast<Int>(*d) % 4 == 0);
sendbuf[rgi.rank()].insert(sendbuf[rgi.rank()].end(), d, d+blocks);
}
++cnt[rgi.rank()][rgj.rank()];
}
}
// Initialize target buffer:
SizeT bufsize = 0;
for(const auto& c: cnt[myrank]){
bufsize += c*blocks;
}
buf.resize(bufsize);
// Transfer data:
for(SizeT o = 0; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT off = 0;
for(SizeT p = 0; p != srcr; ++p){
off += cnt[myrank][p];
}
MPI_Status stat;
MPI_Sendrecv(sendbuf[dstr].data(), cnt[dstr][myrank]*blocks, MPI_DOUBLE, dstr, 0,
buf.data()+off*blocks, cnt[myrank][srcr]*blocks, MPI_DOUBLE, srcr, 0,
MPI_COMM_WORLD, &stat);
}
Vector<Vector<SizeT>> ext = cnt;
// Second loop: Assign map to target buffer positions:
for(auto& c: cnt){
c = Vector<SizeT>(Nranks,0);
}
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() == myrank){
if(rgj.rank() != myrank){
SizeT off = 0;
for(SizeT p = 0; p != rgj.rank(); ++p){
off += ext[myrank][p];
}
map[rgj.pos()] = buf.data()+off*blocks + cnt[rgi.rank()][rgj.rank()]*blocks;
++cnt[rgi.rank()][rgj.rank()];
}
map[rgi.pos()] = data.data() + rgi.local()->pos()*blocks;
}
}
// Third loop: Check:
for(rgi = 0, gi = 0; rgi.lex() != rgi.lmax().val(); ++rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
rgj = gj.lex();
if(rgi.rank() == myrank){
assert(map.data()[rgj.pos()] != nullptr);
const Double vn = *map[rgj.pos()]/blocks;
const SizeT xp = static_cast<SizeT>(vn);
const SizeT orank = xp / env.mRRange->sub(1)->size();
assert(env.mRRange->sub(1)->size() == 16*12*12*12/4);
if(myrank == 0){
std::cout << " pos = " << rgj.pos() << " , val = " << *map[rgj.pos()]
<< " , val_norm = " << vn << " , origin rank = "
<< orank << std::endl;
}
assert(orank == rgj.rank());
assert(vn == rgj.pos());
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
Env env;
run1(env);
run2(env);
MPI_Finalize();
return 0;
}

View file

@ -18,35 +18,13 @@
#include "cnorxz_mpi.h"
#include "test_numbers.h"
#include "rrange.cc.h"
#include "mpi_env.h"
namespace
{
using namespace CNORXZ;
using Test::Numbers;
using namespace CNORXZ::mpi;
class MPIEnv : public ::testing::Environment
{
public:
MPIEnv(int argc, char** argv) : mArgc(argc), mArgv(argv) {}
virtual ~MPIEnv() override {}
virtual void SetUp() override
{
MPI_Init(&mArgc, &mArgv);
}
virtual void TearDown() override
{
MPI_Finalize();
}
protected:
int mArgc;
char** mArgv;
};
class RRange_Test : public ::testing::Test
{

View file

@ -0,0 +1,247 @@
// -*- C++ -*-
/**
@file opt/mpi/tests/setbuf_unit_test.cc
@brief Setbuf unit tests.
Copyright (c) 2024 Christian Zimmermann. All rights reserved.
Mail: chizeta@f3l.de
**/
#include <cstdlib>
#include <iostream>
#include "cnorxz.h"
#include "cnorxz_mpi.h"
#include "test_numbers.h"
#include "rrange.cc.h"
#include "mpi_env.h"
namespace
{
using namespace CNORXZ;
using Test::Numbers;
using namespace CNORXZ::mpi;
class Setbuf_Test : public ::testing::Test
{
protected:
Setbuf_Test()
{
CXZ_ASSERT(getNumRanks() == 4, "exptected 4 ranks");
Vector<Int> xs(L);
Vector<Int> ts(T);
for(SizeT i = 0; i != xs.size(); ++i){
const Int x = static_cast<Int>(i) - static_cast<Int>(xs.size()/2);
xs[i] = x;
}
for(SizeT i = 0; i != ts.size(); ++i){
const Int t = static_cast<Int>(i) - static_cast<Int>(ts.size()/2);
ts[i] = t;
}
mSRange = CRangeFactory(4).create();
mXRange = URangeFactory<Int>(xs).create();
mTRange = URangeFactory<Int>(ts).create();
Vector<RangePtr> rs { mTRange, mXRange, mXRange, mXRange };
mGRange = YRangeFactory(rs).create();
RangePtr g1 = CRangeFactory(1).create();
RangePtr g2 = CRangeFactory(2).create();
Vector<RangePtr> gs { g2, g1, g1, g2 };
mGeom = YRangeFactory(gs).create();
mRRange = rrange(mGRange, mGeom);
}
SizeT T = 16;
SizeT L = 12;
RangePtr mSRange;
RangePtr mXRange;
RangePtr mTRange;
RangePtr mGRange;
RangePtr mGeom;
RangePtr mRRange;
};
template <class TarIndex, class SrcIndex, typename T>
void setupBuffer(const Sptr<TarIndex>& rgj, const Sptr<SrcIndex>& rgi,
const Sptr<Vector<SizeT>>& fmap, const Vector<T>& data,
Vector<T>& buf, Vector<const T*>& map, const SizeT blocks)
{
const SizeT myrank = getRankNumber();
const SizeT Nranks = getNumRanks();
const SizeT mapsize = rgj->range()->size();
map = Vector<const T*>(mapsize,nullptr);
Vector<Vector<T>> sendbuf(Nranks);
for(auto& sb: sendbuf){
sb.reserve(data.size());
}
Vector<Vector<SizeT>> request(Nranks);
const SizeT locsz = rgi->local()->lmax().val();
// First loop: setup send buffer
rgi->ifor( mapXpr(rgj, rgi, fmap,
operation
( [&](SizeT p, SizeT q) {
const SizeT r = p / locsz;
if(myrank != r){
request[r].push_back(p % locsz);
}
} , posop(rgj), posop(rgi) ) ) ,
NoF {} )();
// transfer:
Vector<SizeT> reqsizes(Nranks);
SizeT bufsize = 0;
Vector<Vector<SizeT>> ext(Nranks);
for(auto& e: ext){
e.resize(Nranks);
}
for(SizeT i = 0; i != Nranks; ++i){
reqsizes[i] = request[i].size();
bufsize += reqsizes[i]*blocks;
ext[myrank][i] = reqsizes[i];
}
buf.resize(bufsize);
MPI_Status stat;
// transfer requests:
for(SizeT o = 1; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT sendsize = 0;
MPI_Sendrecv(reqsizes.data()+dstr, 1, MPI_UNSIGNED_LONG, dstr, 0,
&sendsize, 1, MPI_UNSIGNED_LONG, srcr, 0, MPI_COMM_WORLD, &stat);
ext[srcr][myrank] = sendsize;
Vector<SizeT> sendpos(sendsize);
MPI_Sendrecv(request[dstr].data(), reqsizes[dstr], MPI_UNSIGNED_LONG, dstr, 0,
sendpos.data(), sendsize, MPI_UNSIGNED_LONG, srcr, 0, MPI_COMM_WORLD, &stat);
sendbuf[srcr].resize(sendsize*blocks);
for(SizeT i = 0; i != sendsize; ++i){
std::memcpy( sendbuf[srcr].data()+i*blocks, data.data()+sendpos[i]*blocks, blocks*sizeof(T) );
}
}
const MPI_Datatype dt = Typemap<T>::value();
// transfer data:
for(SizeT o = 1; o != Nranks; ++o){
const SizeT dstr = (myrank + o) % Nranks;
const SizeT srcr = (myrank - o + Nranks) % Nranks;
SizeT off = 0;
for(SizeT p = 0; p != srcr; ++p){
off += ext[myrank][p];
}
MPI_Sendrecv(sendbuf[dstr].data(), ext[dstr][myrank]*blocks, dt, dstr, 0,
buf.data()+off*blocks, ext[myrank][srcr]*blocks, dt, srcr, 0,
MPI_COMM_WORLD, &stat);
}
// Second loop: Assign map to target buffer positions:
Vector<SizeT> cnt(Nranks);
rgi->ifor( mapXpr(rgj, rgi, fmap,
operation
( [&](SizeT p, SizeT q) {
const SizeT r = p / locsz;
if(myrank != r){
SizeT off = 0;
for(SizeT s = 0; s != r; ++s){
off += ext[myrank][s];
}
map[p] = buf.data() + off*blocks + cnt[r]*blocks;
++cnt[r];
}
map[q + myrank*locsz] = data.data() + q*blocks;
} , posop(rgj), posop(rgi) ) ), NoF {} )();
}
TEST_F(Setbuf_Test, run)
{
const SizeT myrank = getRankNumber();
//const SizeT Nranks = getNumRanks();
typedef UIndex<Int> UI;
typedef MIndex<UI,UI,UI,UI> LocI;
typedef MIndex<CIndex,CIndex,CIndex,CIndex> RankI;
auto rgi = std::make_shared<RIndex<LocI,RankI>>(mRRange);
auto rgj = std::make_shared<RIndex<LocI,RankI>>(mRRange);
auto rgk = std::make_shared<RIndex<LocI,RankI>>(mRRange);
LocI gi(mGRange);
LocI gj(mGRange);
auto ri = std::make_shared<RankI>(mGeom);
constexpr auto C0 = CSizeT<0> {};
//constexpr auto C1 = CSizeT<1> {};
constexpr auto C2 = CSizeT<2> {};
constexpr auto C3 = CSizeT<3> {};
const SizeT LSize = mRRange->sub(1)->size();
const SizeT blocks = mSRange->size();
Vector<Double> data(LSize*blocks);
for(SizeT i = 0; i != data.size(); ++i){
data[i] = static_cast<Double>(LSize*myrank*blocks+i);
}
*rgj = 0;
while(rgj->rank() != 1){
++*rgj;
}
*rgj->local() = 0;
Vector<Double> buf;
Vector<const Double*> map(mRRange->size(),nullptr);
auto shift = [&](const auto& x){
auto o = x;
std::get<0>(o) += 1;
if(std::get<0>(o) >= static_cast<int>(T)/2) { std::get<0>(o) -= T; }
std::get<2>(o) += 1;
if(std::get<2>(o) >= static_cast<int>(L)/2) { std::get<2>(o) -= L; }
std::get<3>(o) += 1;
if(std::get<3>(o) >= static_cast<int>(L)/2) { std::get<3>(o) -= L; }
return o;
};
const Sptr<Vector<SizeT>> fmap = setupMap(rgj, rgi, shift);
setupBuffer(rgj, rgi, fmap, data, buf, map, mSRange->size());
EXPECT_EQ(mRRange->sub(1)->size(), 16*12*12*12/4);
// Third loop: Check:
for(*rgi = 0, gi = 0; rgi->lex() != rgi->lmax().val(); ++*rgi, ++gi){
gj = gi.lex();
*gj.pack()[C0] = (gj.pack()[C0]->lex() + 1) % gj.pack()[C0]->lmax().val();
*gj.pack()[C2] = (gj.pack()[C2]->lex() + 1) % gj.pack()[C2]->lmax().val();
*gj.pack()[C3] = (gj.pack()[C3]->lex() + 1) % gj.pack()[C3]->lmax().val();
gj();
*rgj = gj.lex();
if(rgi->rank() == myrank){
EXPECT_TRUE(map.data()[rgj->pos()] != nullptr);
const Double vn = *map[rgj->pos()]/blocks;
const SizeT xp = static_cast<SizeT>(vn);
const SizeT orank = xp / mRRange->sub(1)->size();
if(myrank == 0){
std::cout << " pos = " << rgj->pos() << " , val = " << *map[rgj->pos()]
<< " , val_norm = " << vn << " , origin rank = "
<< orank << std::endl;
}
EXPECT_EQ(orank, rgj->rank());
EXPECT_EQ(vn, rgj->pos());
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);
::testing::AddGlobalTestEnvironment( new MPIEnv(argc, argv) );
return RUN_ALL_TESTS();
}