artdaq_demo_hdf5  v1_04_01
FragmentNtuple_dataset.cc
1 #include "tracemf.h"
2 #define TRACE_NAME "FragmentNtuple"
3 
4 #include "artdaq-demo-hdf5/HDF5/hep-hpc/FragmentNtuple.hh"
5 
6 #pragma GCC diagnostic push
7 #pragma GCC diagnostic ignored "-Wmissing-braces"
8 #include "hep_hpc/hdf5/make_column.hpp"
9 #include "hep_hpc/hdf5/make_ntuple.hpp"
10 #pragma GCC diagnostic pop
11 
12 #define DO_COMPRESSION 0
13 #if DO_COMPRESSION
14 #define SCALAR_PROPERTIES \
15  { \
16  hep_hpc::hdf5::PropertyList{H5P_DATASET_CREATE}(&H5Pset_deflate, 7u) \
17  }
18 #define ARRAY_PROPERTIES \
19  { \
20  hep_hpc::hdf5::PropertyList{H5P_DATASET_CREATE}(&H5Pset_deflate, 7u) \
21  }
22 #else
23 #define SCALAR_PROPERTIES \
24  {}
25 #define ARRAY_PROPERTIES \
26  {}
27 #endif
28 
29 artdaq::hdf5::FragmentNtuple::FragmentNtuple(fhicl::ParameterSet const& ps, hep_hpc::hdf5::File const& file)
30 
31  : FragmentDataset(ps, ps.get<std::string>("mode", "write"))
32  , nWordsPerRow_(ps.get<size_t>("nWordsPerRow", 10240))
33  , fragments_(hep_hpc::hdf5::make_ntuple({file, "Fragments"},
35  hep_hpc::hdf5::make_scalar_column<uint64_t>("sequenceID", SCALAR_PROPERTIES),
36  hep_hpc::hdf5::make_scalar_column<uint16_t>("fragmentID", SCALAR_PROPERTIES),
37  hep_hpc::hdf5::make_scalar_column<uint64_t>("timestamp", SCALAR_PROPERTIES),
38  hep_hpc::hdf5::make_scalar_column<uint8_t>("type", SCALAR_PROPERTIES),
39  hep_hpc::hdf5::make_scalar_column<uint64_t>("size", SCALAR_PROPERTIES),
40  hep_hpc::hdf5::make_scalar_column<uint64_t>("index", SCALAR_PROPERTIES),
41  hep_hpc::hdf5::make_column<artdaq::RawDataType, 1>("payload", nWordsPerRow_, ARRAY_PROPERTIES)))
43  , eventHeaders_(hep_hpc::hdf5::make_ntuple({fragments_.file(), "EventHeaders"},
45  hep_hpc::hdf5::make_scalar_column<uint32_t>("run_id", SCALAR_PROPERTIES),
46  hep_hpc::hdf5::make_scalar_column<uint32_t>("subrun_id", SCALAR_PROPERTIES),
47  hep_hpc::hdf5::make_scalar_column<uint32_t>("event_id", SCALAR_PROPERTIES),
48  hep_hpc::hdf5::make_scalar_column<uint64_t>("sequenceID", SCALAR_PROPERTIES),
49  hep_hpc::hdf5::make_scalar_column<uint8_t>("is_complete", SCALAR_PROPERTIES)))
51 {
52  TLOG(TLVL_DEBUG) << "FragmentNtuple Constructor (file) START";
53  if (mode_ == FragmentDatasetMode::Read)
54  {
55  TLOG(TLVL_ERROR) << "ToyFragmentDataset configured in read mode but is not capable of reading!";
56  }
57  TLOG(TLVL_DEBUG) << "FragmentNtuple Constructor (file) END";
58 }
59 
60 artdaq::hdf5::FragmentNtuple::FragmentNtuple(fhicl::ParameterSet const& ps)
61 
62  : FragmentDataset(ps, ps.get<std::string>("mode", "write"))
63  , nWordsPerRow_(ps.get<size_t>("nWordsPerRow", 10240))
64  , fragments_(hep_hpc::hdf5::make_ntuple({ps.get<std::string>("fileName", "fragments.hdf5"), "Fragments"},
66  hep_hpc::hdf5::make_scalar_column<uint64_t>("sequenceID", SCALAR_PROPERTIES),
67  hep_hpc::hdf5::make_scalar_column<uint16_t>("fragmentID", SCALAR_PROPERTIES),
68  hep_hpc::hdf5::make_scalar_column<uint64_t>("timestamp", SCALAR_PROPERTIES),
69  hep_hpc::hdf5::make_scalar_column<uint8_t>("type", SCALAR_PROPERTIES),
70  hep_hpc::hdf5::make_scalar_column<uint64_t>("size", SCALAR_PROPERTIES),
71  hep_hpc::hdf5::make_scalar_column<uint64_t>("index", SCALAR_PROPERTIES),
72  hep_hpc::hdf5::make_column<artdaq::RawDataType, 1>("payload", nWordsPerRow_, ARRAY_PROPERTIES)))
74  , eventHeaders_(hep_hpc::hdf5::make_ntuple({fragments_.file(), "EventHeaders"},
76  hep_hpc::hdf5::make_scalar_column<uint32_t>("run_id", SCALAR_PROPERTIES),
77  hep_hpc::hdf5::make_scalar_column<uint32_t>("subrun_id", SCALAR_PROPERTIES),
78  hep_hpc::hdf5::make_scalar_column<uint32_t>("event_id", SCALAR_PROPERTIES),
79  hep_hpc::hdf5::make_scalar_column<uint64_t>("sequenceID", SCALAR_PROPERTIES),
80  hep_hpc::hdf5::make_scalar_column<uint8_t>("is_complete", SCALAR_PROPERTIES)))
82 {
83  TLOG(TLVL_DEBUG) << "FragmentNtuple Constructor START";
84  if (mode_ == FragmentDatasetMode::Read)
85  {
86  TLOG(TLVL_ERROR) << "ToyFragmentDataset configured in read mode but is not capable of reading!";
87  }
88  TLOG(TLVL_DEBUG) << "FragmentNtuple Constructor END";
89 }
90 
92 {
93  TLOG(TLVL_DEBUG) << "FragmentNtuple Destructor START";
94  fragments_.flush();
95  TLOG(TLVL_DEBUG) << "FragmentNtuple Destructor END";
96 }
97 
98 void artdaq::hdf5::FragmentNtuple::insertOne(artdaq::Fragment const& frag)
99 {
100  TLOG(TLVL_TRACE) << "FragmentNtuple::insertOne BEGIN";
101 
102  for (size_t ii = 0; ii < frag.size(); ii += nWordsPerRow_)
103  {
104  if (ii + nWordsPerRow_ <= frag.size())
105  {
106  fragments_.insert(frag.sequenceID(), frag.fragmentID(), frag.timestamp(), frag.type(),
107  frag.size(), ii, &frag.headerBegin()[ii]);
108  }
109  else
110  {
111  std::vector<artdaq::RawDataType> words(nWordsPerRow_, 0);
112  std::copy(frag.headerBegin() + ii, frag.dataEnd(), words.begin());
113  fragments_.insert(frag.sequenceID(), frag.fragmentID(), frag.timestamp(), frag.type(),
114  frag.size(), ii, &words[0]);
115  }
116  }
117  TLOG(TLVL_TRACE) << "FragmentNtuple::insertOne END";
118 }
119 
120 void artdaq::hdf5::FragmentNtuple::insertHeader(artdaq::detail::RawEventHeader const& hdr)
121 {
122  TLOG(TLVL_TRACE) << "FragmentNtuple::insertHeader: Writing header to eventHeaders_ group";
123  eventHeaders_.insert(hdr.run_id, hdr.subrun_id, hdr.event_id, hdr.sequence_id, hdr.is_complete);
124 }
125 
126 DEFINE_ARTDAQ_DATASET_PLUGIN(artdaq::hdf5::FragmentNtuple)
void insertOne(artdaq::Fragment const &frag) override
Insert a Fragment into the Fragment Ntuple Dataset (write it to the HDF5 file)
Implemementation of FragmentDataset using hep_hpc Ntuples.
virtual ~FragmentNtuple()
FragmentNtuple Destructor.
FragmentNtuple(fhicl::ParameterSet const &ps, hep_hpc::hdf5::File const &file)
FragmentNtuple Constructor with input hep_hpc::hdf5::File.
void insertHeader(artdaq::detail::RawEventHeader const &hdr) override
Insert a RawEventHeader into the Event Header Ntuple Dataset (write it to the HDF5 file) ...
FragmentDatasetMode mode_
Mode of this FragmentDataset, either FragmentDatasetMode::Write or FragmentDatasetMode::Read.
Base class that defines methods for reading and writing to HDF5 files via various implementation plug...