artdaq  v3_03_00
CommandableFragmentGenerator.cc
1 #define TRACE_NAME (app_name + "_CommandableFragmentGenerator").c_str() // include these 2 first -
2 #include "artdaq/DAQdata/Globals.hh"
3 
4 #include "artdaq/Application/CommandableFragmentGenerator.hh"
5 
6 #include <boost/exception/all.hpp>
7 #include <boost/throw_exception.hpp>
8 
9 #include <limits>
10 #include <iterator>
11 
12 #include "canvas/Utilities/Exception.h"
13 #include "cetlib_except/exception.h"
14 #include "fhiclcpp/ParameterSet.h"
15 
16 #include "artdaq-core/Utilities/SimpleLookupPolicy.hh"
17 #include "artdaq-core/Data/Fragment.hh"
18 #include "artdaq-core/Data/ContainerFragmentLoader.hh"
19 #include "artdaq-core/Utilities/ExceptionHandler.hh"
20 #include "artdaq-core/Utilities/TimeUtils.hh"
21 
22 #include <fstream>
23 #include <iomanip>
24 #include <iterator>
25 #include <iostream>
26 #include <iomanip>
27 #include <algorithm>
28 #include <sys/poll.h>
30 
31 #define TLVL_GETNEXT 10
32 #define TLVL_GETNEXT_VERBOSE 20
33 #define TLVL_CHECKSTOP 11
34 #define TLVL_EVCOUNTERINC 12
35 #define TLVL_GETDATALOOP 13
36 #define TLVL_GETDATALOOP_DATABUFFWAIT 21
37 #define TLVL_GETDATALOOP_VERBOSE 20
38 #define TLVL_WAITFORBUFFERREADY 15
39 #define TLVL_GETBUFFERSTATS 16
40 #define TLVL_CHECKDATABUFFER 17
41 #define TLVL_GETMONITORINGDATA 18
42 #define TLVL_APPLYREQUESTS 9
43 #define TLVL_SENDEMPTYFRAGMENTS 19
44 #define TLVL_CHECKWINDOWS 14
45 
47  : mutex_()
48  , requestReceiver_(nullptr)
49  , windowOffset_(0)
50  , windowWidth_(0)
51  , staleTimeout_(Fragment::InvalidTimestamp)
52  , expectedType_(Fragment::EmptyFragmentType)
53  , maxFragmentCount_(std::numeric_limits<size_t>::max())
54  , uniqueWindows_(true)
55  , windows_sent_ooo_()
56  , missing_request_window_timeout_us_(1000000)
57  , window_close_timeout_us_(2000000)
58  , useDataThread_(false)
59  , circularDataBufferMode_(false)
60  , sleep_on_no_data_us_(0)
61  , data_thread_running_(false)
62  , dataBufferDepthFragments_(0)
63  , dataBufferDepthBytes_(0)
64  , maxDataBufferDepthFragments_(1000)
65  , maxDataBufferDepthBytes_(1000)
66  , useMonitoringThread_(false)
67  , monitoringInterval_(0)
68  , lastMonitoringCall_()
69  , isHardwareOK_(true)
70  , dataBuffer_()
71  , newDataBuffer_()
72  , run_number_(-1)
73  , subrun_number_(-1)
74  , timeout_(std::numeric_limits<uint64_t>::max())
75  , timestamp_(std::numeric_limits<uint64_t>::max())
76  , should_stop_(false)
77  , exception_(false)
78  , force_stop_(false)
79  , latest_exception_report_("none")
80  , ev_counter_(1)
81  , board_id_(-1)
82  , instance_name_for_metrics_("FragmentGenerator")
83  , sleep_on_stop_us_(0)
84 {}
85 
87  : mutex_()
88  , requestReceiver_(nullptr)
89  , windowOffset_(ps.get<Fragment::timestamp_t>("request_window_offset", 0))
90  , windowWidth_(ps.get<Fragment::timestamp_t>("request_window_width", 0))
91  , staleTimeout_(ps.get<Fragment::timestamp_t>("stale_request_timeout", 0xFFFFFFFF))
92  , expectedType_(ps.get<Fragment::type_t>("expected_fragment_type", Fragment::type_t(Fragment::EmptyFragmentType)))
93  , uniqueWindows_(ps.get<bool>("request_windows_are_unique", true))
94  , windows_sent_ooo_()
95  , missing_request_window_timeout_us_(ps.get<size_t>("missing_request_window_timeout_us", 5000000))
96  , window_close_timeout_us_(ps.get<size_t>("window_close_timeout_us", 2000000))
97  , useDataThread_(ps.get<bool>("separate_data_thread", false))
98  , circularDataBufferMode_(ps.get<bool>("circular_buffer_mode", false))
99  , sleep_on_no_data_us_(ps.get<size_t>("sleep_on_no_data_us", 0))
100  , data_thread_running_(false)
101  , dataBufferDepthFragments_(0)
102  , dataBufferDepthBytes_(0)
103  , maxDataBufferDepthFragments_(ps.get<int>("data_buffer_depth_fragments", 1000))
104  , maxDataBufferDepthBytes_(ps.get<size_t>("data_buffer_depth_mb", 1000) * 1024 * 1024)
105  , useMonitoringThread_(ps.get<bool>("separate_monitoring_thread", false))
106  , monitoringInterval_(ps.get<int64_t>("hardware_poll_interval_us", 0))
107  , lastMonitoringCall_()
108  , isHardwareOK_(true)
109  , dataBuffer_()
110  , newDataBuffer_()
111  , run_number_(-1)
112  , subrun_number_(-1)
113  , timeout_(std::numeric_limits<uint64_t>::max())
114  , timestamp_(std::numeric_limits<uint64_t>::max())
115  , should_stop_(false)
116  , exception_(false)
117  , force_stop_(false)
118  , latest_exception_report_("none")
119  , ev_counter_(1)
120  , board_id_(-1)
121  , sleep_on_stop_us_(0)
122 {
123  board_id_ = ps.get<int>("board_id");
124  instance_name_for_metrics_ = "BoardReader." + boost::lexical_cast<std::string>(board_id_);
125 
126  fragment_ids_ = ps.get<std::vector<artdaq::Fragment::fragment_id_t>>("fragment_ids", std::vector<artdaq::Fragment::fragment_id_t>());
127 
128  TLOG(TLVL_TRACE) << "artdaq::CommandableFragmentGenerator::CommandableFragmentGenerator(ps)";
129  int fragment_id = ps.get<int>("fragment_id", -99);
130 
131  if (fragment_id != -99)
132  {
133  if (fragment_ids_.size() != 0)
134  {
135  latest_exception_report_ = "Error in CommandableFragmentGenerator: can't both define \"fragment_id\" and \"fragment_ids\" in FHiCL document";
136  throw cet::exception(latest_exception_report_);
137  }
138  else
139  {
140  fragment_ids_.emplace_back(fragment_id);
141  }
142  }
143 
144  sleep_on_stop_us_ = ps.get<int>("sleep_on_stop_us", 0);
145 
146  dataBuffer_.emplace_back(FragmentPtr(new Fragment()));
147  (*dataBuffer_.begin())->setSystemType(Fragment::EmptyFragmentType);
148 
149  std::string modeString = ps.get<std::string>("request_mode", "ignored");
150  if (modeString == "single" || modeString == "Single")
151  {
152  mode_ = RequestMode::Single;
153  }
154  else if (modeString.find("buffer") != std::string::npos || modeString.find("Buffer") != std::string::npos)
155  {
156  mode_ = RequestMode::Buffer;
157  }
158  else if (modeString == "window" || modeString == "Window")
159  {
160  mode_ = RequestMode::Window;
161  }
162  else if (modeString.find("ignore") != std::string::npos || modeString.find("Ignore") != std::string::npos)
163  {
164  mode_ = RequestMode::Ignored;
165  }
166  TLOG(TLVL_DEBUG) << "Request mode is " << printMode_();
167 
168  if (mode_ != RequestMode::Ignored)
169  {
170  if (!useDataThread_)
171  {
172  latest_exception_report_ = "Error in CommandableFragmentGenerator: use_data_thread must be true when request_mode is not \"Ignored\"!";
173  throw cet::exception(latest_exception_report_);
174  }
175  requestReceiver_.reset(new RequestReceiver(ps));
176  }
177 }
178 
180 {
181  joinThreads();
182  requestReceiver_.reset(nullptr);
183 }
184 
186 {
187  should_stop_ = true;
188  force_stop_ = true;
189  TLOG(TLVL_DEBUG) << "Joining dataThread";
190  if (dataThread_.joinable()) dataThread_.join();
191  TLOG(TLVL_DEBUG) << "Joining monitoringThread";
192  if (monitoringThread_.joinable()) monitoringThread_.join();
193  TLOG(TLVL_DEBUG) << "joinThreads complete";
194 }
195 
197 {
198  bool result = true;
199 
200  if (check_stop()) usleep(sleep_on_stop_us_);
201  if (exception() || force_stop_) return false;
202 
203  if (!useMonitoringThread_ && monitoringInterval_ > 0)
204  {
205  TLOG(TLVL_GETNEXT) << "getNext: Checking whether to collect Monitoring Data";
206  auto now = std::chrono::steady_clock::now();
207 
208  if (TimeUtils::GetElapsedTimeMicroseconds(lastMonitoringCall_, now) >= static_cast<size_t>(monitoringInterval_))
209  {
210  TLOG(TLVL_GETNEXT) << "getNext: Collecting Monitoring Data";
211  isHardwareOK_ = checkHWStatus_();
212  TLOG(TLVL_GETNEXT) << "getNext: isHardwareOK_ is now " << std::boolalpha << isHardwareOK_;
213  lastMonitoringCall_ = now;
214  }
215  }
216 
217  try
218  {
219  std::lock_guard<std::mutex> lk(mutex_);
220  if (useDataThread_)
221  {
222  TLOG(TLVL_TRACE) << "getNext: Calling applyRequests";
223  result = applyRequests(output);
224  TLOG(TLVL_TRACE) << "getNext: Done with applyRequests result=" << std::boolalpha << result;
225  for (auto dataIter = output.begin(); dataIter != output.end(); ++dataIter)
226  {
227  TLOG(20) << "getNext: applyRequests() returned fragment with sequenceID = " << (*dataIter)->sequenceID()
228  << ", timestamp = " << (*dataIter)->timestamp() << ", and sizeBytes = " << (*dataIter)->sizeBytes();
229  }
230 
231  if (exception())
232  {
233  TLOG(TLVL_ERROR) << "Exception found in BoardReader with board ID " << board_id() << "; BoardReader will now return error status when queried";
234  throw cet::exception("CommandableFragmentGenerator") << "Exception found in BoardReader with board ID " << board_id() << "; BoardReader will now return error status when queried";
235  }
236  }
237  else
238  {
239  if (!isHardwareOK_)
240  {
241  TLOG(TLVL_ERROR) << "Stopping CFG because the hardware reports bad status!";
242  return false;
243  }
244  TLOG(TLVL_TRACE) << "getNext: Calling getNext_ w/ ev_counter()=" << ev_counter();
245  try
246  {
247  result = getNext_(output);
248  }
249  catch (...)
250  {
251  throw;
252  }
253  TLOG(TLVL_TRACE) << "getNext: Done with getNext_ - ev_counter() now " << ev_counter();
254  for (auto dataIter = output.begin(); dataIter != output.end(); ++dataIter)
255  {
256  TLOG(TLVL_GETNEXT_VERBOSE) << "getNext: getNext_() returned fragment with sequenceID = " << (*dataIter)->sequenceID()
257  << ", timestamp = " << (*dataIter)->timestamp() << ", and sizeBytes = " << (*dataIter)->sizeBytes();
258  }
259  }
260  }
261  catch (const cet::exception& e)
262  {
263  latest_exception_report_ = "cet::exception caught in getNext(): ";
264  latest_exception_report_.append(e.what());
265  TLOG(TLVL_ERROR) << "getNext: cet::exception caught: " << e;
266  set_exception(true);
267  return false;
268  }
269  catch (const boost::exception& e)
270  {
271  latest_exception_report_ = "boost::exception caught in getNext(): ";
272  latest_exception_report_.append(boost::diagnostic_information(e));
273  TLOG(TLVL_ERROR) << "getNext: boost::exception caught: " << boost::diagnostic_information(e);
274  set_exception(true);
275  return false;
276  }
277  catch (const std::exception& e)
278  {
279  latest_exception_report_ = "std::exception caught in getNext(): ";
280  latest_exception_report_.append(e.what());
281  TLOG(TLVL_ERROR) << "getNext: std::exception caught: " << e.what();
282  set_exception(true);
283  return false;
284  }
285  catch (...)
286  {
287  latest_exception_report_ = "Unknown exception caught in getNext().";
288  TLOG(TLVL_ERROR) << "getNext: unknown exception caught";
289  set_exception(true);
290  return false;
291  }
292 
293  if (!result)
294  {
295  TLOG(TLVL_DEBUG) << "getNext: Either getNext_ or applyRequests returned false, stopping";
296  }
297 
298  if (metricMan && !output.empty())
299  {
300  auto timestamp = output.front()->timestamp();
301 
302  if (output.size() > 1)
303  { // Only bother sorting if >1 entry
304  for (auto& outputfrag : output)
305  {
306  if (outputfrag->timestamp() > timestamp)
307  {
308  timestamp = outputfrag->timestamp();
309  }
310  }
311  }
312 
313  metricMan->sendMetric("Last Timestamp", timestamp, "Ticks", 1,
314  MetricMode::LastPoint, app_name);
315  }
316 
317  return result;
318 }
319 
321 {
322  TLOG(TLVL_CHECKSTOP) << "CFG::check_stop: should_stop=" << should_stop() << ", useDataThread_=" << useDataThread_ << ", exception status =" << int(exception());
323 
324  if (!should_stop()) return false;
325  if (!useDataThread_ || mode_ == RequestMode::Ignored) return true;
326  if (force_stop_) return true;
327 
328  // check_stop returns true if the CFG should stop. We should wait for the RequestReceiver to stop before stopping.
329  TLOG(TLVL_DEBUG) << "should_stop is true, force_stop_ is false, requestReceiver_->isRunning() is " << std::boolalpha << requestReceiver_->isRunning();
330  return !requestReceiver_->isRunning();
331 }
332 
334 {
335  if (fragment_ids_.size() != 1)
336  {
337  throw cet::exception("Error in CommandableFragmentGenerator: can't call fragment_id() unless member fragment_ids_ vector is length 1");
338  }
339  else
340  {
341  return fragment_ids_[0];
342  }
343 }
344 
346 {
347  if (force || mode_ == RequestMode::Ignored)
348  {
349  TLOG(TLVL_EVCOUNTERINC) << "ev_counter_inc: Incrementing ev_counter from " << ev_counter() << " by " << step;
350  return ev_counter_.fetch_add(step);
351  }
352  return ev_counter_.load();
353 } // returns the prev value
354 
355 void artdaq::CommandableFragmentGenerator::StartCmd(int run, uint64_t timeout, uint64_t timestamp)
356 {
357  TLOG(TLVL_TRACE) << "Start Command received.";
358  if (run < 0) throw cet::exception("CommandableFragmentGenerator") << "negative run number";
359 
360  timeout_ = timeout;
361  timestamp_ = timestamp;
362  ev_counter_.store(1);
363  windows_sent_ooo_.clear();
364  dataBuffer_.clear();
365  should_stop_.store(false);
366  force_stop_.store(false);
367  exception_.store(false);
368  run_number_ = run;
369  subrun_number_ = 1;
370  latest_exception_report_ = "none";
371 
372  start();
373 
374  std::unique_lock<std::mutex> lk(mutex_);
375  if (useDataThread_) startDataThread();
376  if (useMonitoringThread_) startMonitoringThread();
377  if (mode_ != RequestMode::Ignored && !requestReceiver_->isRunning()) requestReceiver_->startRequestReceiverThread();
378  TLOG(TLVL_TRACE) << "Start Command complete.";
379 }
380 
381 void artdaq::CommandableFragmentGenerator::StopCmd(uint64_t timeout, uint64_t timestamp)
382 {
383  TLOG(TLVL_TRACE) << "Stop Command received.";
384 
385  timeout_ = timeout;
386  timestamp_ = timestamp;
387  if (requestReceiver_ && requestReceiver_->isRunning()) {
388  TLOG(TLVL_DEBUG) << "Stopping Request receiver thread BEGIN";
389  requestReceiver_->stopRequestReceiverThread();
390  TLOG(TLVL_DEBUG) << "Stopping Request receiver thread END";
391  }
392 
393  stopNoMutex();
394  should_stop_.store(true);
395  std::unique_lock<std::mutex> lk(mutex_);
396  stop();
397 
398  joinThreads();
399  TLOG(TLVL_TRACE) << "Stop Command complete.";
400 }
401 
402 void artdaq::CommandableFragmentGenerator::PauseCmd(uint64_t timeout, uint64_t timestamp)
403 {
404  TLOG(TLVL_TRACE) << "Pause Command received.";
405  timeout_ = timeout;
406  timestamp_ = timestamp;
407  //if (requestReceiver_->isRunning()) requestReceiver_->stopRequestReceiverThread();
408 
409  pauseNoMutex();
410  should_stop_.store(true);
411  std::unique_lock<std::mutex> lk(mutex_);
412 
413  pause();
414 }
415 
416 void artdaq::CommandableFragmentGenerator::ResumeCmd(uint64_t timeout, uint64_t timestamp)
417 {
418  TLOG(TLVL_TRACE) << "Resume Command received.";
419  timeout_ = timeout;
420  timestamp_ = timestamp;
421 
422  subrun_number_ += 1;
423  should_stop_ = false;
424  {
425  std::unique_lock<std::mutex> lk(dataBufferMutex_);
426  dataBuffer_.clear();
427  }
428  // no lock required: thread not started yet
429  resume();
430 
431  std::unique_lock<std::mutex> lk(mutex_);
432  //if (useDataThread_) startDataThread();
433  //if (useMonitoringThread_) startMonitoringThread();
434  //if (mode_ != RequestMode::Ignored && !requestReceiver_->isRunning()) requestReceiver_->startRequestReceiverThread();
435  TLOG(TLVL_TRACE) << "Resume Command complete.";
436 }
437 
438 std::string artdaq::CommandableFragmentGenerator::ReportCmd(std::string const& which)
439 {
440  TLOG(TLVL_TRACE) << "Report Command received.";
441  std::lock_guard<std::mutex> lk(mutex_);
442 
443  // 14-May-2015, KAB: please see the comments associated with the report()
444  // methods in the CommandableFragmentGenerator.hh file for more information
445  // on the use of those methods in this method.
446 
447  // check if the child class has something meaningful for this request
448  std::string childReport = reportSpecific(which);
449  if (childReport.length() > 0) { return childReport; }
450 
451  // handle the requests that we can take care of at this level
452  if (which == "latest_exception")
453  {
454  return latest_exception_report_;
455  }
456 
457  // check if the child class has provided a catch-all report function
458  childReport = report();
459  if (childReport.length() > 0) { return childReport; }
460 
461  // if we haven't been able to come up with any report so far, say so
462  std::string tmpString = "The \"" + which + "\" command is not ";
463  tmpString.append("currently supported by the ");
464  tmpString.append(metricsReportingInstanceName());
465  tmpString.append(" fragment generator.");
466  TLOG(TLVL_TRACE) << "Report Command complete.";
467  return tmpString;
468 }
469 
470 // Default implemenetations of state functions
472 {
473 #pragma message "Using default implementation of CommandableFragmentGenerator::pauseNoMutex()"
474 }
475 
477 {
478 #pragma message "Using default implementation of CommandableFragmentGenerator::pause()"
479 }
480 
482 {
483 #pragma message "Using default implementation of CommandableFragmentGenerator::resume()"
484 }
485 
487 {
488 #pragma message "Using default implementation of CommandableFragmentGenerator::report()"
489  return "";
490 }
491 
493 {
494 #pragma message "Using default implementation of CommandableFragmentGenerator::reportSpecific(std::string)"
495  return "";
496 }
497 
499 {
500 #pragma message "Using default implementation of CommandableFragmentGenerator::checkHWStatus_()"
501  return true;
502 }
503 
504 bool artdaq::CommandableFragmentGenerator::metaCommand(std::string const&, std::string const&)
505 {
506 #pragma message "Using default implementation of CommandableFragmentGenerator::metaCommand(std::string, std::string)"
507  return true;
508 }
509 
511 {
512  if (dataThread_.joinable()) dataThread_.join();
513  TLOG(TLVL_INFO) << "Starting Data Receiver Thread";
514  try {
515  dataThread_ = boost::thread(&CommandableFragmentGenerator::getDataLoop, this);
516  }
517  catch (const boost::exception& e)
518  {
519  TLOG(TLVL_ERROR) << "Caught boost::exception starting Data Receiver thread: " << boost::diagnostic_information(e) << ", errno=" << errno;
520  std::cerr << "Caught boost::exception starting Data Receiver thread: " << boost::diagnostic_information(e) << ", errno=" << errno << std::endl;
521  exit(5);
522  }
523 }
524 
526 {
527  if (monitoringThread_.joinable()) monitoringThread_.join();
528  TLOG(TLVL_INFO) << "Starting Hardware Monitoring Thread";
529  try {
530  monitoringThread_ = boost::thread(&CommandableFragmentGenerator::getMonitoringDataLoop, this);
531  }
532  catch (const boost::exception& e)
533  {
534  TLOG(TLVL_ERROR) << "Caught boost::exception starting Hardware Monitoring thread: " << boost::diagnostic_information(e) << ", errno=" << errno;
535  std::cerr << "Caught boost::exception starting Hardware Monitoring thread: " << boost::diagnostic_information(e) << ", errno=" << errno << std::endl;
536  exit(5);
537  }
538 }
539 
541 {
542  switch (mode_)
543  {
544  case RequestMode::Single:
545  return "Single";
546  case RequestMode::Buffer:
547  return "Buffer";
548  case RequestMode::Window:
549  return "Window";
550  case RequestMode::Ignored:
551  return "Ignored";
552  }
553 
554  return "ERROR";
555 }
556 
557 
558 //
559 // The "useDataThread_" thread
560 //
562 {
563  data_thread_running_ = true;
564  while (!force_stop_)
565  {
566  if (!isHardwareOK_)
567  {
568  TLOG(TLVL_DEBUG) << "getDataLoop: isHardwareOK is " << isHardwareOK_ << ", aborting data thread";
569  data_thread_running_ = false;
570  return;
571  }
572 
573  TLOG(TLVL_GETDATALOOP) << "getDataLoop: calling getNext_";
574 
575  bool data = false;
576  auto startdata = std::chrono::steady_clock::now();
577 
578  try
579  {
580  data = getNext_(newDataBuffer_);
581  }
582  catch (...)
583  {
584  ExceptionHandler(ExceptionHandlerRethrow::no,
585  "Exception thrown by fragment generator in CommandableFragmentGenerator::getDataLoop; setting exception state to \"true\"");
586  set_exception(true);
587 
588  data_thread_running_ = false;
589  return;
590  }
591  for (auto dataIter = newDataBuffer_.begin(); dataIter != newDataBuffer_.end(); ++dataIter)
592  {
593  TLOG(TLVL_GETDATALOOP_VERBOSE) << "getDataLoop: getNext_() returned fragment with timestamp = " << (*dataIter)->timestamp() << ", and sizeBytes = " << (*dataIter)->sizeBytes();
594  }
595 
596  if (metricMan)
597  {
598  metricMan->sendMetric("Avg Data Acquisition Time", TimeUtils::GetElapsedTime(startdata), "s", 3, artdaq::MetricMode::Average);
599  }
600 
601  if (newDataBuffer_.size() == 0 && sleep_on_no_data_us_ > 0)
602  {
603  usleep(sleep_on_no_data_us_);
604  }
605 
606  TLOG(TLVL_GETDATALOOP_DATABUFFWAIT) << "Waiting for data buffer ready";
607  if (!waitForDataBufferReady()) return;
608  TLOG(TLVL_GETDATALOOP_DATABUFFWAIT) << "Done waiting for data buffer ready";
609 
610  TLOG(TLVL_GETDATALOOP) << "getDataLoop: processing data";
611  if (data && !force_stop_)
612  {
613  std::unique_lock<std::mutex> lock(dataBufferMutex_);
614  switch (mode_)
615  {
616  case RequestMode::Single:
617  // While here, if for some strange reason more than one event's worth of data is returned from getNext_...
618  while (newDataBuffer_.size() >= fragment_ids_.size())
619  {
620  dataBuffer_.clear();
621  auto it = newDataBuffer_.begin();
622  std::advance(it, fragment_ids_.size());
623  dataBuffer_.splice(dataBuffer_.end(), newDataBuffer_, newDataBuffer_.begin(), it);
624  }
625  break;
626  case RequestMode::Buffer:
627  case RequestMode::Ignored:
628  case RequestMode::Window:
629  default:
630  //dataBuffer_.reserve(dataBuffer_.size() + newDataBuffer_.size());
631  dataBuffer_.splice(dataBuffer_.end(), newDataBuffer_);
632  break;
633  }
634  getDataBufferStats();
635  }
636 
637  {
638  std::unique_lock<std::mutex> lock(dataBufferMutex_);
639  if (dataBuffer_.size() > 0)
640  {
641  dataCondition_.notify_all();
642  }
643  }
644  if (!data || force_stop_)
645  {
646  TLOG(TLVL_INFO) << "Data flow has stopped. Ending data collection thread";
647  std::unique_lock<std::mutex> lock(dataBufferMutex_);
648  data_thread_running_ = false;
649  if (requestReceiver_) requestReceiver_->ClearRequests();
650  newDataBuffer_.clear();
651  TLOG(TLVL_INFO) << "getDataLoop: Ending thread";
652  return;
653  }
654  }
655 }
656 
658 {
659  auto startwait = std::chrono::steady_clock::now();
660  auto first = true;
661  auto lastwaittime = 0ULL;
662 
663  {
664  std::unique_lock<std::mutex> lock(dataBufferMutex_);
665  getDataBufferStats();
666  }
667 
668  while (dataBufferIsTooLarge())
669  {
670  if (!circularDataBufferMode_)
671  {
672  if (should_stop())
673  {
674  TLOG(TLVL_DEBUG) << "Run ended while waiting for buffer to shrink!";
675  std::unique_lock<std::mutex> lock(dataBufferMutex_);
676  getDataBufferStats();
677  dataCondition_.notify_all();
678  data_thread_running_ = false;
679  return false;
680  }
681  auto waittime = TimeUtils::GetElapsedTimeMilliseconds(startwait);
682 
683  if (first || (waittime != lastwaittime && waittime % 1000 == 0))
684  {
685  TLOG(TLVL_WARNING) << "Bad Omen: Data Buffer has exceeded its size limits. "
686  << "(seq_id=" << ev_counter()
687  << ", frags=" << dataBufferDepthFragments_ << "/" << maxDataBufferDepthFragments_
688  << ", szB=" << dataBufferDepthBytes_ << "/" << maxDataBufferDepthBytes_ << ")";
689  TLOG(TLVL_TRACE) << "Bad Omen: Possible causes include requests not getting through or Ignored-mode BR issues";
690  first = false;
691  }
692  if (waittime % 5 && waittime != lastwaittime)
693  {
694  TLOG(TLVL_WAITFORBUFFERREADY) << "getDataLoop: Data Retreival paused for " << waittime << " ms waiting for data buffer to drain";
695  }
696  lastwaittime = waittime;
697  usleep(1000);
698  }
699  else
700  {
701  std::unique_lock<std::mutex> lock(dataBufferMutex_);
702  getDataBufferStats(); // Re-check under lock
703  if (dataBufferIsTooLarge())
704  {
705  if (dataBuffer_.begin() == dataBuffer_.end())
706  {
707  TLOG(TLVL_WARNING) << "Data buffer is reported as too large, but doesn't contain any Fragments! Possible corrupt memory!";
708  continue;
709  }
710  if (*dataBuffer_.begin())
711  {
712  TLOG(TLVL_WAITFORBUFFERREADY) << "waitForDataBufferReady: Dropping Fragment with timestamp " << (*dataBuffer_.begin())->timestamp() << " from data buffer (Buffer over-size, circular data buffer mode)";
713  }
714  dataBuffer_.erase(dataBuffer_.begin());
715  getDataBufferStats();
716  }
717 
718  }
719  }
720  return true;
721 }
722 
724 {
725  return (maxDataBufferDepthFragments_ > 0 && dataBufferDepthFragments_ > maxDataBufferDepthFragments_) || (maxDataBufferDepthBytes_ > 0 && dataBufferDepthBytes_ > maxDataBufferDepthBytes_);
726 }
727 
729 {
731  dataBufferDepthFragments_ = dataBuffer_.size();
732  size_t acc = 0;
733  TLOG(TLVL_GETBUFFERSTATS) << "getDataBufferStats: Calculating buffer size";
734  for (auto i = dataBuffer_.begin(); i != dataBuffer_.end(); ++i)
735  {
736  if (i->get() != nullptr)
737  {
738  acc += (*i)->sizeBytes();
739  }
740  }
741  dataBufferDepthBytes_ = acc;
742 
743  if (metricMan)
744  {
745  TLOG(TLVL_GETBUFFERSTATS) << "getDataBufferStats: Sending Metrics";
746  metricMan->sendMetric("Buffer Depth Fragments", dataBufferDepthFragments_.load(), "fragments", 1, MetricMode::LastPoint);
747  metricMan->sendMetric("Buffer Depth Bytes", dataBufferDepthBytes_.load(), "bytes", 1, MetricMode::LastPoint);
748  }
749  TLOG(TLVL_GETBUFFERSTATS) << "getDataBufferStats: frags=" << dataBufferDepthFragments_.load() << "/" << maxDataBufferDepthFragments_
750  << ", sz=" << dataBufferDepthBytes_.load() << "/" << maxDataBufferDepthBytes_;
751 }
752 
754 {
755  std::unique_lock<std::mutex> lock(dataBufferMutex_);
756  dataCondition_.wait_for(lock, std::chrono::milliseconds(10));
757  if (dataBufferDepthFragments_ > 0)
758  {
759  if ((mode_ == RequestMode::Buffer || mode_ == RequestMode::Window))
760  {
761  // Eliminate extra fragments
762  while (dataBufferIsTooLarge())
763  {
764  TLOG(TLVL_CHECKDATABUFFER) << "checkDataBuffer: Dropping Fragment with timestamp " << (*dataBuffer_.begin())->timestamp() << " from data buffer (Buffer over-size)";
765  dataBuffer_.erase(dataBuffer_.begin());
766  getDataBufferStats();
767  }
768  if (dataBuffer_.size() > 0)
769  {
770  TLOG(TLVL_CHECKDATABUFFER) << "Determining if Fragments can be dropped from data buffer";
771  Fragment::timestamp_t last = dataBuffer_.back()->timestamp();
772  Fragment::timestamp_t min = last > staleTimeout_ ? last - staleTimeout_ : 0;
773  for (auto it = dataBuffer_.begin(); it != dataBuffer_.end();)
774  {
775  if ((*it)->timestamp() < min)
776  {
777  TLOG(TLVL_CHECKDATABUFFER) << "checkDataBuffer: Dropping Fragment with timestamp " << (*it)->timestamp() << " from data buffer (timeout=" << staleTimeout_ << ", min=" << min << ")";
778  it = dataBuffer_.erase(it);
779  }
780  else
781  {
782  ++it;
783  }
784  }
785  getDataBufferStats();
786  }
787  }
788  else if (mode_ == RequestMode::Single && dataBuffer_.size() > fragment_ids_.size())
789  {
790  // Eliminate extra fragments
791  while (dataBuffer_.size() > fragment_ids_.size())
792  {
793  dataBuffer_.erase(dataBuffer_.begin());
794  }
795  }
796  }
797 }
798 
800 {
801  while (!force_stop_)
802  {
803  if (should_stop() || monitoringInterval_ <= 0)
804  {
805  TLOG(TLVL_DEBUG) << "getMonitoringDataLoop: should_stop() is " << std::boolalpha << should_stop()
806  << " and monitoringInterval is " << monitoringInterval_ << ", returning";
807  return;
808  }
809  TLOG(TLVL_GETMONITORINGDATA) << "getMonitoringDataLoop: Determining whether to call checkHWStatus_";
810 
811  auto now = std::chrono::steady_clock::now();
812  if (TimeUtils::GetElapsedTimeMicroseconds(lastMonitoringCall_, now) >= static_cast<size_t>(monitoringInterval_))
813  {
814  isHardwareOK_ = checkHWStatus_();
815  TLOG(TLVL_GETMONITORINGDATA) << "getMonitoringDataLoop: isHardwareOK_ is now " << std::boolalpha << isHardwareOK_;
816  lastMonitoringCall_ = now;
817  }
818  usleep(monitoringInterval_ / 10);
819  }
820 }
821 
823 {
824  // We just copy everything that's here into the output.
825  TLOG(TLVL_APPLYREQUESTS) << "Mode is Ignored; Copying data to output";
826  std::move(dataBuffer_.begin(), dataBuffer_.end(), std::inserter(frags, frags.end()));
827  dataBuffer_.clear();
828 }
829 
831 {
832  // We only care about the latest request received. Send empties for all others.
833  auto requests = requestReceiver_->GetRequests();
834  while (requests.size() > 1)
835  {
836  // std::map is ordered by key => Last sequence ID in the map is the one we care about
837  requestReceiver_->RemoveRequest(requests.begin()->first);
838  requests.erase(requests.begin());
839  }
840  sendEmptyFragments(frags, requests);
841 
842  // If no requests remain after sendEmptyFragments, return
843  if (requests.size() == 0 || !requests.count(ev_counter())) return;
844 
845  if (dataBuffer_.size() > 0)
846  {
847  TLOG(TLVL_APPLYREQUESTS) << "Mode is Single; Sending copy of last event";
848  for (auto& fragptr : dataBuffer_)
849  {
850  // Return the latest data point
851  auto frag = fragptr.get();
852  auto newfrag = std::unique_ptr<artdaq::Fragment>(new Fragment(ev_counter(), frag->fragmentID()));
853  newfrag->resize(frag->size() - detail::RawFragmentHeader::num_words());
854  memcpy(newfrag->headerAddress(), frag->headerAddress(), frag->sizeBytes());
855  newfrag->setTimestamp(requests[ev_counter()]);
856  newfrag->setSequenceID(ev_counter());
857  frags.push_back(std::move(newfrag));
858  }
859  }
860  else
861  {
862  sendEmptyFragment(frags, ev_counter(), "No data for");
863  }
864  requestReceiver_->RemoveRequest(ev_counter());
865  ev_counter_inc(1, true);
866 }
867 
869 {
870  // We only care about the latest request received. Send empties for all others.
871  auto requests = requestReceiver_->GetRequests();
872  while (requests.size() > 1)
873  {
874  // std::map is ordered by key => Last sequence ID in the map is the one we care about
875  requestReceiver_->RemoveRequest(requests.begin()->first);
876  requests.erase(requests.begin());
877  }
878  sendEmptyFragments(frags, requests);
879 
880  // If no requests remain after sendEmptyFragments, return
881  if (requests.size() == 0 || !requests.count(ev_counter())) return;
882 
883  TLOG(TLVL_DEBUG) << "Creating ContainerFragment for Buffered Fragments";
884  frags.emplace_back(new artdaq::Fragment(ev_counter(), fragment_id()));
885  frags.back()->setTimestamp(requests[ev_counter()]);
886  ContainerFragmentLoader cfl(*frags.back());
887  cfl.set_missing_data(false); // Buffer mode is never missing data, even if there IS no data.
888 
889  // Buffer mode TFGs should simply copy out the whole dataBuffer_ into a ContainerFragment
890  // Window mode TFGs must do a little bit more work to decide which fragments to send for a given request
891  for (auto it = dataBuffer_.begin(); it != dataBuffer_.end();)
892  {
893  TLOG(TLVL_APPLYREQUESTS) << "ApplyRequests: Adding Fragment with timestamp " << (*it)->timestamp() << " to Container with sequence ID " << ev_counter();
894  cfl.addFragment(*it);
895  it = dataBuffer_.erase(it);
896  }
897  requestReceiver_->RemoveRequest(ev_counter());
898  ev_counter_inc(1, true);
899 }
900 
902 {
903  TLOG(TLVL_APPLYREQUESTS) << "applyRequestsWindowMode BEGIN";
904 
905  auto requests = requestReceiver_->GetRequests();
906 
907  TLOG(TLVL_APPLYREQUESTS) << "applyRequestsWindowMode: Starting request processing";
908  for (auto req = requests.begin(); req != requests.end();)
909  {
910  TLOG(TLVL_APPLYREQUESTS) << "applyRequestsWindowMode: processing request with sequence ID " << req->first << ", timestamp " << req->second;
911 
912 
913  while (req->first < ev_counter() && requests.size() > 0)
914  {
915  TLOG(TLVL_APPLYREQUESTS) << "applyRequestsWindowMode: Clearing passed request for sequence ID " << req->first;
916  requestReceiver_->RemoveRequest(req->first);
917  req = requests.erase(req);
918  }
919  if (requests.size() == 0) break;
920 
921  auto ts = req->second;
922  TLOG(TLVL_APPLYREQUESTS) << "applyRequests: Checking that data exists for request window " << req->first;
923  Fragment::timestamp_t min = ts > windowOffset_ ? ts - windowOffset_ : 0;
924  Fragment::timestamp_t max = min + windowWidth_;
925  TLOG(TLVL_APPLYREQUESTS) << "ApplyRequests: min is " << min << ", max is " << max
926  << " and last point in buffer is " << (dataBuffer_.size() > 0 ? dataBuffer_.back()->timestamp() : 0) << " (sz=" << dataBuffer_.size() << ")";
927  bool windowClosed = dataBuffer_.size() > 0 && dataBuffer_.back()->timestamp() >= max;
928  bool windowTimeout = !windowClosed && TimeUtils::GetElapsedTimeMicroseconds(requestReceiver_->GetRequestTime(req->first)) > window_close_timeout_us_;
929  if (windowTimeout)
930  {
931  TLOG(TLVL_WARNING) << "applyRequests: A timeout occurred waiting for data to close the request window ({" << min << "-" << max
932  << "}, buffer={" << (dataBuffer_.size() > 0 ? dataBuffer_.front()->timestamp() : 0) << "-"
933  << (dataBuffer_.size() > 0 ? dataBuffer_.back()->timestamp() : 0)
934  << "} ). Time waiting: "
935  << TimeUtils::GetElapsedTimeMicroseconds(requestReceiver_->GetRequestTime(req->first)) << " us "
936  << "(> " << window_close_timeout_us_ << " us).";
937  }
938  if (windowClosed || !data_thread_running_ || windowTimeout)
939  {
940  TLOG(TLVL_DEBUG) << "applyRequests: Creating ContainerFragment for Window-requested Fragments";
941  frags.emplace_back(new artdaq::Fragment(req->first, fragment_id()));
942  frags.back()->setTimestamp(ts);
943  ContainerFragmentLoader cfl(*frags.back());
944 
945  // In the spirit of NOvA's MegaPool: (RS = Request start (min), RE = Request End (max))
946  // --- | Buffer Start | --- | Buffer End | ---
947  //1. RS RE | | | |
948  //2. RS | | RE | |
949  //3. RS | | | | RE
950  //4. | | RS RE | |
951  //5. | | RS | | RE
952  //6. | | | | RS RE
953  //
954  // If RE (or RS) is after the end of the buffer, we wait for window_close_timeout_us_. If we're here, then that means that windowClosed is false, and the missing_data flag should be set.
955  // If RS (or RE) is before the start of the buffer, then missing_data should be set to true, as data is assumed to arrive in the buffer in timestamp order
956  // If the dataBuffer has size 0, then windowClosed will be false
957  if (!windowClosed || (dataBuffer_.size() > 0 && dataBuffer_.front()->timestamp() > min))
958  {
959  TLOG(TLVL_DEBUG) << "applyRequests: Request window starts before and/or ends after the current data buffer, setting ContainerFragment's missing_data flag!"
960  << " (requestWindowRange=[" << min << "," << max << "], "
961  << "buffer={" << (dataBuffer_.size() > 0 ? dataBuffer_.front()->timestamp() : 0) << "-"
962  << (dataBuffer_.size() > 0 ? dataBuffer_.back()->timestamp() : 0) << "}";
963  cfl.set_missing_data(true);
964  }
965 
966  // Buffer mode TFGs should simply copy out the whole dataBuffer_ into a ContainerFragment
967  // Window mode TFGs must do a little bit more work to decide which fragments to send for a given request
968  for (auto it = dataBuffer_.begin(); it != dataBuffer_.end();)
969  {
970  Fragment::timestamp_t fragT = (*it)->timestamp();
971  if (fragT < min || fragT > max || (fragT == max && windowWidth_ > 0))
972  {
973  ++it;
974  continue;
975  }
976 
977  TLOG(TLVL_APPLYREQUESTS) << "applyRequests: Adding Fragment with timestamp " << (*it)->timestamp() << " to Container";
978  cfl.addFragment(*it);
979 
980  if (uniqueWindows_)
981  {
982  it = dataBuffer_.erase(it);
983  }
984  else
985  {
986  ++it;
987  }
988  }
989  requestReceiver_->RemoveRequest(req->first);
990  checkOutOfOrderWindows(req->first);
991  requestReceiver_->RemoveRequest(req->first);
992  req = requests.erase(req);
993  }
994  else
995  {
996  ++req;
997  }
998  }
999 }
1000 
1002 {
1003  if (check_stop() || exception())
1004  {
1005  return false;
1006  }
1007 
1008  // Wait for data, if in ignored mode, or a request otherwise
1009  if (mode_ == RequestMode::Ignored)
1010  {
1011  while (dataBufferDepthFragments_ <= 0)
1012  {
1013  if (check_stop() || exception() || !isHardwareOK_) return false;
1014  std::unique_lock<std::mutex> lock(dataBufferMutex_);
1015  dataCondition_.wait_for(lock, std::chrono::milliseconds(10), [this]() { return dataBufferDepthFragments_ > 0; });
1016  }
1017  }
1018  else
1019  {
1020  if ((check_stop() && requestReceiver_->size() == 0) || exception()) return false;
1021  checkDataBuffer();
1022 
1023  // Wait up to 1000 ms for a request...
1024  auto counter = 0;
1025 
1026  while (requestReceiver_->size() == 0 && counter < 100)
1027  {
1028  if (check_stop() || exception()) return false;
1029 
1030  checkDataBuffer();
1031 
1032  requestReceiver_->WaitForRequests(10); // milliseconds
1033  counter++;
1034  }
1035  }
1036 
1037  {
1038  std::unique_lock<std::mutex> dlk(dataBufferMutex_);
1039 
1040  switch (mode_)
1041  {
1042  case RequestMode::Single:
1043  applyRequestsSingleMode(frags);
1044  break;
1045  case RequestMode::Window:
1046  applyRequestsWindowMode(frags);
1047  break;
1048  case RequestMode::Buffer:
1049  applyRequestsBufferMode(frags);
1050  break;
1051  case RequestMode::Ignored:
1052  default:
1053  applyRequestsIgnoredMode(frags);
1054  break;
1055  }
1056 
1057  if (!data_thread_running_ || force_stop_)
1058  {
1059  TLOG(TLVL_INFO) << "Data thread has stopped; Clearing data buffer";
1060  dataBuffer_.clear();
1061  }
1062 
1063  getDataBufferStats();
1064  }
1065 
1066  if (frags.size() > 0)
1067  TLOG(TLVL_APPLYREQUESTS) << "Finished Processing Event " << (*frags.begin())->sequenceID() << " for fragment_id " << fragment_id() << ".";
1068  return true;
1069 }
1070 
1071 bool artdaq::CommandableFragmentGenerator::sendEmptyFragment(artdaq::FragmentPtrs& frags, size_t seqId, std::string desc)
1072 {
1073  TLOG(TLVL_WARNING) << desc << " sequence ID " << seqId << ", sending empty fragment";
1074  for (auto fid : fragment_ids_)
1075  {
1076  auto frag = new Fragment();
1077  frag->setSequenceID(seqId);
1078  frag->setFragmentID(fid);
1079  frag->setSystemType(Fragment::EmptyFragmentType);
1080  frags.emplace_back(FragmentPtr(frag));
1081  }
1082  return true;
1083 }
1084 
1085 void artdaq::CommandableFragmentGenerator::sendEmptyFragments(artdaq::FragmentPtrs& frags, std::map<Fragment::sequence_id_t, Fragment::timestamp_t>& requests)
1086 {
1087  if (requests.size() > 0)
1088  {
1089  TLOG(TLVL_SENDEMPTYFRAGMENTS) << "Sending Empty Fragments for Sequence IDs from " << ev_counter() << " up to but not including " << requests.begin()->first;
1090  while (requests.begin()->first > ev_counter())
1091  {
1092  sendEmptyFragment(frags, ev_counter(), "Missed request for");
1093  ev_counter_inc(1, true);
1094  }
1095  }
1096 }
1097 
1098 void artdaq::CommandableFragmentGenerator::checkOutOfOrderWindows(artdaq::Fragment::sequence_id_t seq)
1099 {
1100  windows_sent_ooo_[seq] = std::chrono::steady_clock::now();
1101 
1102  auto it = windows_sent_ooo_.begin();
1103  while (it != windows_sent_ooo_.end())
1104  {
1105  if (seq == it->first && it->first == ev_counter())
1106  {
1107  TLOG(TLVL_CHECKWINDOWS) << "checkOutOfOrderWindows: Sequence ID matches ev_counter, incrementing ev_counter (" << ev_counter() << ")";
1108  ev_counter_inc(1, true);
1109  it = windows_sent_ooo_.erase(it);
1110  }
1111  else if (it->first <= ev_counter())
1112  {
1113  TLOG(TLVL_CHECKWINDOWS) << "checkOutOfOrderWindows: Data-taking has caught up to out-of-order window request " << it->first << ", removing from list. ev_counter=" << ev_counter();
1114  requestReceiver_->RemoveRequest(ev_counter());
1115  if (it->first == ev_counter()) ev_counter_inc(1, true);
1116  it = windows_sent_ooo_.erase(it);
1117  }
1118  else if (TimeUtils::GetElapsedTimeMicroseconds(it->second) > missing_request_window_timeout_us_)
1119  {
1120  TLOG(TLVL_CHECKWINDOWS) << "checkOutOfOrderWindows: Out-of-order window " << it->first << " has timed out, setting current sequence ID and removing from list";
1121  while (ev_counter() <= it->first)
1122  {
1123  if (ev_counter() < it->first) TLOG(TLVL_WARNING) << "Missed request for sequence ID " << ev_counter() << "! Will not send any data for this sequence ID!";
1124  requestReceiver_->RemoveRequest(ev_counter());
1125  ev_counter_inc(1, true);
1126  }
1127  windows_sent_ooo_.erase(windows_sent_ooo_.begin(), it);
1128  it = windows_sent_ooo_.erase(it);
1129  }
1130  else
1131  {
1132  TLOG(TLVL_CHECKWINDOWS) << "checkOutOfOrderWindows: Out-of-order window " << it->first << " waiting. Current event counter = " << ev_counter();
1133  ++it;
1134  }
1135  }
1136 }
1137 
int fragment_id() const
Get the current Fragment ID, if there is only one.
void applyRequestsSingleMode(artdaq::FragmentPtrs &frags)
Create fragments using data buffer for request mode Single. Precondition: dataBufferMutex_ and reques...
virtual bool checkHWStatus_()
Check any relavent hardware status registers. Return false if an error condition exists that should h...
virtual ~CommandableFragmentGenerator()
CommandableFragmentGenerator Destructor.
void applyRequestsBufferMode(artdaq::FragmentPtrs &frags)
Create fragments using data buffer for request mode Buffer. Precondition: dataBufferMutex_ and reques...
bool sendEmptyFragment(FragmentPtrs &frags, size_t sequenceId, std::string desc)
Send an EmptyFragmentType Fragment.
void getMonitoringDataLoop()
This function regularly calls checkHWStatus_(), and sets the isHardwareOK flag accordingly.
void startDataThread()
Function that launches the data thread (getDataLoop())
std::string ReportCmd(std::string const &which="")
Get a report about a user-specified run-time quantity.
virtual bool metaCommand(std::string const &command, std::string const &arg)
The meta-command is used for implementing user-specific commands in a CommandableFragmentGenerator.
bool dataBufferIsTooLarge()
Test the configured constraints on the data buffer.
void StopCmd(uint64_t timeout, uint64_t timestamp)
Stop the CommandableFragmentGenerator.
void applyRequestsWindowMode(artdaq::FragmentPtrs &frags)
Create fragments using data buffer for request mode Window. Precondition: dataBufferMutex_ and reques...
void StartCmd(int run, uint64_t timeout, uint64_t timestamp)
Start the CommandableFragmentGenerator.
virtual void pauseNoMutex()
On call to PauseCmd, pauseNoMutex() is called prior to PauseCmd acquiring the mutex ...
bool check_stop()
Routine used by applyRequests to make sure that all outstanding requests have been fulfilled before r...
void ResumeCmd(uint64_t timeout, uint64_t timestamp)
Resume the CommandableFragmentGenerator.
CommandableFragmentGenerator()
CommandableFragmentGenerator default constructor.
bool getNext(FragmentPtrs &output) overridefinal
getNext calls either applyRequests or getNext_ to get any data that is ready to be sent to the EventB...
bool waitForDataBufferReady()
Wait for the data buffer to drain (dataBufferIsTooLarge returns false), periodically reporting status...
size_t ev_counter_inc(size_t step=1, bool force=false)
Increment the event counter, if the current RequestMode allows it.
void applyRequestsIgnoredMode(artdaq::FragmentPtrs &frags)
Create fragments using data buffer for request mode Ignored. Precondition: dataBufferMutex_ and reque...
void PauseCmd(uint64_t timeout, uint64_t timestamp)
Pause the CommandableFragmentGenerator.
void getDataLoop()
When separate_data_thread is set to true, this loop repeatedly calls getNext_ and adds returned Fragm...
void sendEmptyFragments(FragmentPtrs &frags, std::map< Fragment::sequence_id_t, Fragment::timestamp_t > &requests)
This function is for Buffered and Single request modes, as they can only respond to one data request ...
Receive data requests and make them available to CommandableFragmentGenerator or other interested par...
void startMonitoringThread()
Function that launches the monitoring thread (getMonitoringDataLoop())
virtual void pause()
If a CommandableFragmentGenerator subclass is reading from hardware, the implementation of pause() sh...
virtual void resume()
The subrun number will be incremented before a call to resume.
void checkDataBuffer()
Perform data buffer pruning operations. If the RequestMode is Single, removes all but the latest Frag...
virtual std::string report()
Let&#39;s say that the contract with the report() functions is that they return a non-empty string if the...
std::string printMode_()
Return the string representation of the current RequestMode.
void getDataBufferStats()
Calculate the size of the dataBuffer and report appropriate metrics.
void checkOutOfOrderWindows(Fragment::sequence_id_t seq)
Check the windows_sent_ooo_ map for sequence IDs that may be removed.
virtual std::string reportSpecific(std::string const &what)
Report the status of a specific quantity
bool applyRequests(FragmentPtrs &output)
See if any requests have been received, and add the corresponding data Fragment objects to the output...
void joinThreads()
Join any data-taking threads. Should be called when destructing CommandableFragmentGenerator.