hdf5 + hdf5-mpi: generalize read functions (WIP)
This commit is contained in:
parent
ca207d4013
commit
f99562056d
6 changed files with 126 additions and 81 deletions
src/opt
hdf5
hdf5_mpi
|
@ -48,14 +48,20 @@ namespace CNORXZ
|
|||
template <class I, typename M>
|
||||
MArray<T> SDataset<T>::read(const IndexInterface<I,M>& idx) const
|
||||
{
|
||||
// DEPRECATED!!!
|
||||
CXZ_ASSERT(idx.lex() == 0, "only full read supported")
|
||||
CXZ_ASSERT(idx.dim() == mFileRange->dim(), "got index of inconsistent dimension, got"
|
||||
<< idx.dim() << ", expected " << mFileRange->dim());
|
||||
const RangePtr outrange = idx.range();
|
||||
CXZ_ASSERT(outrange->size() == mFileRange->size(),
|
||||
"got index of range of inconsistent size, expected "
|
||||
<< mFileRange->size() << ", got " << outrange->size());
|
||||
const I mbeg(outrange);
|
||||
I mend(outrange);
|
||||
mend = mend.lmax().val()-1;
|
||||
MArray<T> out(outrange);
|
||||
readbase(out.data(), outrange, nullptr);
|
||||
//readbase(out.data(), outrange, nullptr);
|
||||
readbase(out.data(), sizeof(T), toYptr(mbeg), toYptr(mend), toYptr(idx));
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -66,8 +72,13 @@ namespace CNORXZ
|
|||
CXZ_ASSERT(beg.dim() == mFileRange->dim(), "got index of inconsistent dimension, got"
|
||||
<< beg.dim() << ", expected " << mFileRange->dim());
|
||||
const RangePtr outrange = beg.prange(end);
|
||||
const I mbeg(outrange);
|
||||
I mend(outrange);
|
||||
mend = mend.lmax().val()-1;
|
||||
MArray<T> out(outrange);
|
||||
readbase(out.data(), outrange, toYptr(beg));
|
||||
//readbase(out.data(), outrange, toYptr(beg));
|
||||
//readbase(out.data(), toYptr(beg), toYptr(end), toYptr(beg));
|
||||
readbase(out.data(), sizeof(T), toYptr(mbeg), toYptr(mend), toYptr(beg));
|
||||
return out;
|
||||
}
|
||||
|
||||
|
|
|
@ -70,6 +70,17 @@ namespace CNORXZ
|
|||
@param beg Position within the file space.
|
||||
*/
|
||||
virtual void readbase(void* dest, RangePtr readrange, Sptr<YIndex> beg) const;
|
||||
|
||||
/** Read the dataset.
|
||||
@param dest Pointer to destination.
|
||||
@param tsize Size of dest data type.
|
||||
@param mbeg Index pointing to the begin of read destination.
|
||||
@param mend Index pointing to the (inclusive) end of read destination.
|
||||
@param fbeg Position within the file space.
|
||||
*/
|
||||
virtual void readbase(void* dest, SizeT tsize, Sptr<YIndex> mbeg, Sptr<YIndex> mend, Sptr<YIndex> fbeg) const;
|
||||
|
||||
virtual hid_t mkdxpl() const;
|
||||
|
||||
/** Initalize the dataset.
|
||||
@param data Array containing the dataset.
|
||||
|
|
|
@ -130,7 +130,7 @@ namespace CNORXZ
|
|||
H5Sselect_hyperslab(mFilespace, H5S_SELECT_SET, fpos.data(), NULL, dims.data(), NULL);
|
||||
}
|
||||
const hid_t memspace = H5Screate_simple(dims.size(), dims.data(), NULL);
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
const hid_t xfer_plist_id = this->mkdxpl();
|
||||
H5Dwrite(mId, mType, memspace, mFilespace, xfer_plist_id, data);
|
||||
H5Pclose(xfer_plist_id);
|
||||
H5Sclose(memspace);
|
||||
|
@ -161,8 +161,7 @@ namespace CNORXZ
|
|||
}
|
||||
const hid_t mem_space_id = H5Screate_simple(static_cast<hsize_t>(dims.size()),
|
||||
dims.data(), nullptr);
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
//MArray<T> out(readRange);
|
||||
const hid_t xfer_plist_id = this->mkdxpl();
|
||||
const herr_t err = H5Dread(mId, mType, mem_space_id, mFilespace, xfer_plist_id, dest);
|
||||
CXZ_ASSERT(err >= 0, "error while reading dataset '" << mName
|
||||
<< "', errorcode :" << err);
|
||||
|
@ -170,6 +169,59 @@ namespace CNORXZ
|
|||
H5Sclose(mem_space_id);
|
||||
}
|
||||
|
||||
void Dataset::readbase(void* dest, SizeT tsize, Sptr<YIndex> mbeg, Sptr<YIndex> mend,
|
||||
Sptr<YIndex> fbeg) const
|
||||
{
|
||||
const SizeT D = mFileRange->dim();
|
||||
const bool todo = mbeg != nullptr;
|
||||
Vector<hsize_t> offset(D);
|
||||
Vector<hsize_t> dims(D);
|
||||
if(todo){
|
||||
//RangePtr dr = mbeg->range();
|
||||
//const bool parallel = true;
|
||||
CXZ_ASSERT(mend != nullptr, "no end edge given");
|
||||
CXZ_ASSERT(fbeg->range()->dim() == D, "dimension of file index ("
|
||||
<< fbeg->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
CXZ_ASSERT(mbeg->range()->dim() == D, "dimension of data index ("
|
||||
<< mbeg->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
CXZ_ASSERT(mend->range()->dim() == D, "dimension of data index ("
|
||||
<< mend->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
|
||||
for(SizeT i = 0; i != D; ++i){
|
||||
const SizeT bi = mbeg->pack().get(i)->lex();
|
||||
const SizeT ei = mend->pack().get(i)->lex();
|
||||
const SizeT fbegi = fbeg->pack().get(i)->lex();
|
||||
dims[i] = ei - bi + 1; // inclusive!
|
||||
offset[i] = fbegi;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(SizeT i = 0; i != D; ++i){
|
||||
dims[i] = 0;
|
||||
offset[i] = 0;
|
||||
}
|
||||
}
|
||||
H5Sselect_hyperslab(mFilespace, H5S_SELECT_SET, offset.data(), NULL, dims.data(), NULL);
|
||||
const hid_t mem_space_id = H5Screate_simple(static_cast<hsize_t>(dims.size()),
|
||||
dims.data(), nullptr);
|
||||
const hid_t xfer_plist_id = this->mkdxpl();
|
||||
char* truedest = reinterpret_cast<char*>(dest) + tsize*mbeg->pos();
|
||||
const herr_t err = H5Dread(mId, mType, mem_space_id, mFilespace, xfer_plist_id, reinterpret_cast<void*>(truedest));
|
||||
CXZ_ASSERT(err >= 0, "error while reading dataset '" << mName
|
||||
<< "', errorcode :" << err);
|
||||
H5Pclose(xfer_plist_id);
|
||||
H5Sclose(mem_space_id);
|
||||
}
|
||||
|
||||
hid_t Dataset::mkdxpl() const
|
||||
{
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
return xfer_plist_id;
|
||||
}
|
||||
|
||||
const RangePtr& Dataset::dataRange() const
|
||||
{
|
||||
return mFileRange;
|
||||
|
|
|
@ -56,7 +56,12 @@ namespace CNORXZ
|
|||
"got index of range of inconsistent size, expected "
|
||||
<< mFileRange->size() << ", got " << outrange->size());
|
||||
mpi::RArray<T> out(outrange);
|
||||
readbase(out.data(), outrange, nullptr);
|
||||
auto mbeg = std::make_shared<mpi::RIndex<YIndex,YIndex>>(outrange);
|
||||
auto mend = std::make_shared<mpi::RIndex<YIndex,YIndex>>(outrange);
|
||||
*mend = mend->lmax().val()-1;
|
||||
auto fbeg = std::make_shared<YIndex>(mFileRange);
|
||||
//readbase(out.data(), outrange, nullptr);
|
||||
rreadbase(out.data(), sizeof(T), mbeg, mend, fbeg);
|
||||
return out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,14 @@ namespace CNORXZ
|
|||
virtual RDataset& writebase(const RangePtr& writeRange, Sptr<YIndex> pos,
|
||||
const void* data) override;
|
||||
virtual void readbase(void* dest, RangePtr readrange, Sptr<YIndex> beg) const override;
|
||||
//virtual void readbase(void* dest, Sptr<YIndex> mbeg, Sptr<YIndex> mend,
|
||||
// Sptr<YIndex> fbeg) const override;
|
||||
|
||||
virtual hid_t mkdxpl() const override;
|
||||
|
||||
void rreadbase(void* dest, SizeT size, Sptr<mpi::RIndex<YIndex,YIndex>> mbeg,
|
||||
Sptr<mpi::RIndex<YIndex,YIndex>> mend, Sptr<YIndex> fbeg) const;
|
||||
|
||||
/** Initalize the dataset.
|
||||
@param data Array containing the dataset.
|
||||
*/
|
||||
|
|
|
@ -88,77 +88,31 @@ namespace CNORXZ
|
|||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void RDataset::readbase(void* dest, Sptr<YIndex> mbeg, Sptr<YIndex> mend,
|
||||
Sptr<YIndex> fbeg) const
|
||||
|
||||
void RDataset::rreadbase(void* dest, SizeT tsize, Sptr<mpi::RIndex<YIndex,YIndex>> mbeg,
|
||||
Sptr<mpi::RIndex<YIndex,YIndex>> mend, Sptr<YIndex> fbeg) const
|
||||
{
|
||||
const SizeT D = mFileRange->dim();
|
||||
const bool todo = mbeg != nullptr;
|
||||
Vector<hsize_t> offset(D);
|
||||
Vector<hsize_t> dims(D);
|
||||
if(todo){
|
||||
RangePtr dr = mbeg->range();
|
||||
//const bool parallel = true;
|
||||
CXZ_ASSERT(mend != nullptr, "no end edge given");
|
||||
CXZ_ASSERT(beg->range()->dim() == D, "dimension of file index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
CXZ_ASSERT(mbeg->range()->dim() == D, "dimension of data index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
CXZ_ASSERT(mend->range()->dim() == D, "dimension of data index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
|
||||
for(SizeT i = 0; i != D; ++i){
|
||||
const SizeT bi = mbeg->pack().get(i)->lex();
|
||||
const SizeT ei = mend->pack().get(i)->lex();
|
||||
const SizeT fgebi = fbeg->pack().get(i)->lex();
|
||||
dims[i] = ei - bi + 1; // inclusive!
|
||||
offset[i] = fbegi;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(SizeT i = 0; i != D; ++i){
|
||||
dims[i] = 0;
|
||||
offset[i] = 0;
|
||||
}
|
||||
}
|
||||
H5Sselect_hyperslab(mFilespace, H5S_SELECT_SET, offset.data(), NULL, dims.data(), NULL);
|
||||
const hid_t mem_space_id = H5Screate_simple(static_cast<hsize_t>(dims.size()),
|
||||
dims.data(), nullptr);
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
H5Pset_dxpl_mpio(xfer_plist_id, H5FD_MPIO_COLLECTIVE);
|
||||
const herr_t err = H5Dread(mId, mType, mem_space_id, mFilespace, xfer_plist_id, dest);
|
||||
CXZ_ASSERT(err >= 0, "error while reading dataset '" << mName
|
||||
<< "', errorcode :" << err);
|
||||
H5Pclose(xfer_plist_id);
|
||||
H5Sclose(mem_space_id);
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
void RDataset::readbase(void* dest, Sptr<mpi::RIndex<YIndex,YIndex>> mbeg,
|
||||
Sptr<mpi::RIndex<YIndex,YIndex>> mend, Sptr<YIndex> fbeg) const
|
||||
{
|
||||
const SizeT D = mFileRange->dim()
|
||||
CXZ_ASSERT(beg->range()->dim() == D, "dimension of file index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
CXZ_ASSERT(mbeg->range()->dim() == D, "dimension of data index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
CXZ_ASSERT(mend->range()->dim() == D, "dimension of data index ("
|
||||
<< dr->dim() << ") different from dimension of file range ("
|
||||
<< mFileRange->dim() << ")");
|
||||
CXZ_ASSERT(fbeg->range()->dim() == D, "dimension of file index ("
|
||||
<< fbeg->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
CXZ_ASSERT(mbeg->local()->range()->dim() == D, "dimension of data index ("
|
||||
<< mbeg->local()->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
CXZ_ASSERT(mend->local()->range()->dim() == D, "dimension of data index ("
|
||||
<< mend->local()->range()->dim() << ") different from dimension of file range ("
|
||||
<< D << ")");
|
||||
|
||||
Sptr<YIndex> mbegl = std::make_shared<YIndex>(*mbeg->local());
|
||||
Sptr<YIndex> mendl = std::make_shared<YIndex>(*mend->local());
|
||||
Sptr<YIndex> fbegl = std::make_shared<YIndex>(*fbeg);
|
||||
|
||||
Sptr<YIndex> mbegl = std::make_shared<YIndex>(mbeg->local());
|
||||
Sptr<YIndex> mendl = std::make_shared<YIndex>(mend->local());
|
||||
Sptr<YIndex> fbegl = std::make_shared<YIndex>(fbeg);
|
||||
|
||||
mbeg->localize();
|
||||
mend->localize();
|
||||
auto mbeg2 = std::make_shared<mpi::RIndex<YIndex,YIndex>>(mbeg->range());
|
||||
mbeg2->synchronize();
|
||||
mbeg2->localize();
|
||||
//mbeg->localize();
|
||||
//mend->localize();
|
||||
//auto mbeg2 = std::make_shared<mpi::RIndex<YIndex,YIndex>>(mbeg->range());
|
||||
//mbeg2->synchronize();
|
||||
auto rmy = mbeg2->rankI();
|
||||
auto rb = mbeg->rankI();
|
||||
auto re = mend->rankI();
|
||||
|
@ -166,14 +120,14 @@ namespace CNORXZ
|
|||
auto le = mend->local();
|
||||
|
||||
bool skip = false;
|
||||
for(SizeT i = 0; i != offset.size(); ++i){
|
||||
for(SizeT i = 0; i != D; ++i){
|
||||
const SizeT rmyi = rmy->pack().get(i)->lex();
|
||||
const SizeT rbi = rb->pack().get(i)->lex();
|
||||
const SizeT rei = re->pack().get(i)->lex();
|
||||
const SizeT lbi = lb->pack().get(i)->lex();
|
||||
const SizeT lei = le->pack().get(i)->lex();
|
||||
const SizeT Li = lb->pack().get(i)->range()->size();
|
||||
const SizeT fgebi = fbeg->pack().get(i)->lex();
|
||||
const SizeT fbegi = fbeg->pack().get(i)->lex();
|
||||
|
||||
if(rei < rmyi or rbi > rmyi){
|
||||
skip = true;
|
||||
|
@ -181,7 +135,7 @@ namespace CNORXZ
|
|||
const SizeT lbegi = (rbi < rmyi) ? 0 : ( (rbi > rmyi) ? Li-1 : lbi );
|
||||
const SizeT lendi = (rei < rmyi) ? 0 : ( (rei > rmyi) ? Li-1 : lei );
|
||||
|
||||
const SizeT offxi = Li - fbegi % Li + ( rmyi - rbi ) * Li;
|
||||
const SizeT offxi = Li - fbegi % Li + ( rmyi - rbi - 1 ) * Li;
|
||||
|
||||
*mbegl->pack().get(i) = lbegi;
|
||||
*mendl->pack().get(i) = lendi;
|
||||
|
@ -193,10 +147,10 @@ namespace CNORXZ
|
|||
|
||||
if(skip){
|
||||
// synchronous MPI read -> have to call this functio on all ranks
|
||||
readbase(dest, nullptr, nullptr, fbegl);
|
||||
Dataset::readbase(dest, tsize, nullptr, nullptr, fbegl);
|
||||
}
|
||||
else {
|
||||
readbase(dest, mbegl, mendl, fbegl);
|
||||
Dataset::readbase(dest, tsize, mbegl, mendl, fbegl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -240,9 +194,7 @@ namespace CNORXZ
|
|||
H5Sselect_hyperslab(mFilespace, H5S_SELECT_SET, offset.data(), NULL, dims.data(), NULL);
|
||||
const hid_t mem_space_id = H5Screate_simple(static_cast<hsize_t>(dims.size()),
|
||||
dims.data(), nullptr);
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
H5Pset_dxpl_mpio(xfer_plist_id, H5FD_MPIO_COLLECTIVE);
|
||||
//MArray<T> out(readRange);
|
||||
const hid_t xfer_plist_id = this->mkdxpl();
|
||||
const herr_t err = H5Dread(mId, mType, mem_space_id, mFilespace, xfer_plist_id, dest);
|
||||
CXZ_ASSERT(err >= 0, "error while reading dataset '" << mName
|
||||
<< "', errorcode :" << err);
|
||||
|
@ -251,6 +203,13 @@ namespace CNORXZ
|
|||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
hid_t RDataset::mkdxpl() const
|
||||
{
|
||||
const hid_t xfer_plist_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
H5Pset_dxpl_mpio(xfer_plist_id, H5FD_MPIO_COLLECTIVE);
|
||||
return xfer_plist_id;
|
||||
}
|
||||
|
||||
bool RDataset::checkHaveParallel() const
|
||||
{
|
||||
const ContentBase* p = parent();
|
||||
|
|
Loading…
Reference in a new issue