11 #include "canvas/Utilities/Exception.h"
12 #include "cetlib_except/exception.h"
14 #include "artdaq/DAQdata/Globals.hh"
15 #define TRACE_NAME (app_name + "_RoutingManagerCore").c_str() // before trace.h
16 #include "artdaq-core/Data/Fragment.hh"
17 #include "artdaq-core/Utilities/ExceptionHandler.hh"
19 #include "artdaq/Application/RoutingManagerCore.hh"
22 #include "artdaq/RoutingPolicies/makeRoutingManagerPolicy.hh"
30 : shutdown_requested_(false)
31 , stop_requested_(true)
32 , pause_requested_(false)
35 TLOG(TLVL_DEBUG) <<
"Constructor";
42 TLOG(TLVL_DEBUG) <<
"Destructor";
43 artdaq::StatisticsCollection::getInstance().requestStop();
44 token_receiver_->stopTokenReception(
true);
49 TLOG(TLVL_DEBUG) <<
"initialize method called with "
50 <<
"ParameterSet = \"" << pset.to_string()
54 fhicl::ParameterSet daq_pset;
57 daq_pset = pset.get<fhicl::ParameterSet>(
"daq");
62 <<
"Unable to find the DAQ parameters in the initialization "
63 <<
"ParameterSet: \"" + pset.to_string() +
"\".";
67 if (daq_pset.has_key(
"rank"))
69 if (my_rank >= 0 && daq_pset.get<
int>(
"rank") != my_rank)
71 TLOG(TLVL_WARNING) <<
"Routing Manager rank specified at startup is different than rank specified at configure! Using rank received at configure!";
73 my_rank = daq_pset.get<
int>(
"rank");
77 TLOG(TLVL_ERROR) <<
"Routing Manager rank not specified at startup or in configuration! Aborting";
83 policy_pset_ = daq_pset.get<fhicl::ParameterSet>(
"policy");
88 <<
"Unable to find the policy parameters in the DAQ initialization ParameterSet: \"" + daq_pset.to_string() +
"\".";
94 token_receiver_pset_ = daq_pset.get<fhicl::ParameterSet>(
"token_receiver");
99 <<
"Unable to find the token_receiver parameters in the DAQ initialization ParameterSet: \"" + daq_pset.to_string() +
"\".";
104 fhicl::ParameterSet metric_pset;
107 metric_pset = daq_pset.get<fhicl::ParameterSet>(
"metrics");
112 if (metric_pset.is_empty())
114 TLOG(TLVL_INFO) <<
"No metric plugins appear to be defined";
118 metricMan->initialize(metric_pset, app_name);
122 ExceptionHandler(ExceptionHandlerRethrow::no,
123 "Error loading metrics in RoutingManagerCore::initialize()");
127 auto policy_plugin_spec = policy_pset_.get<std::string>(
"policy",
"");
128 if (policy_plugin_spec.length() == 0)
131 <<
"No fragment generator (parameter name = \"policy\") was "
132 <<
"specified in the policy ParameterSet. The "
133 <<
"DAQ initialization PSet was \"" << daq_pset.to_string() <<
"\".";
142 std::stringstream exception_string;
143 exception_string <<
"Exception thrown during initialization of policy of type \""
144 << policy_plugin_spec <<
"\"";
146 ExceptionHandler(ExceptionHandlerRethrow::no, exception_string.str());
148 TLOG(TLVL_DEBUG) <<
"FHiCL parameter set used to initialize the policy which threw an exception: " << policy_pset_.to_string();
153 rt_priority_ = daq_pset.get<
int>(
"rt_priority", 0);
154 sender_ranks_ = daq_pset.get<std::vector<int>>(
"sender_ranks");
156 receive_ack_events_ = std::vector<epoll_event>(sender_ranks_.size());
158 auto mode = daq_pset.get<
bool>(
"senders_send_by_send_count",
false);
160 max_table_update_interval_ms_ = daq_pset.get<
size_t>(
"table_update_interval_ms", 1000);
161 current_table_interval_ms_ = max_table_update_interval_ms_;
162 max_ack_cycle_count_ = daq_pset.get<
size_t>(
"table_ack_retry_count", 5);
163 send_tables_port_ = daq_pset.get<
int>(
"table_update_port", 35556);
164 receive_acks_port_ = daq_pset.get<
int>(
"table_acknowledge_port", 35557);
165 send_tables_address_ = daq_pset.get<std::string>(
"table_update_address",
"227.128.12.28");
166 multicast_out_hostname_ = daq_pset.get<std::string>(
"routing_manager_hostname",
"localhost");
169 statsHelperPtr_->createCollectors(daq_pset, 100, 30.0, 60.0, TABLE_UPDATES_STAT_KEY);
172 token_receiver_ = std::make_unique<TokenReceiver>(token_receiver_pset_, policy_, routing_mode_, sender_ranks_.size(), max_table_update_interval_ms_);
173 token_receiver_->setStatsHelper(statsHelperPtr_, TOKENS_RECEIVED_STAT_KEY);
174 token_receiver_->startTokenReception();
175 token_receiver_->pauseTokenReception();
177 shutdown_requested_.store(
false);
184 for (
auto& rank : sender_ranks_)
186 if (active_ranks_.count(rank) == 0u)
188 active_ranks_.insert(rank);
191 stop_requested_.store(
false);
192 pause_requested_.store(
false);
194 statsHelperPtr_->resetStatistics();
196 metricMan->do_start();
197 table_update_count_ = 0;
198 token_receiver_->setRunNumber(run_id_.run());
199 token_receiver_->resumeTokenReception();
201 TLOG(TLVL_INFO) <<
"Started run " << run_id_.run();
207 TLOG(TLVL_INFO) <<
"Stopping run " << run_id_.run()
208 <<
" after " << table_update_count_ <<
" table updates."
209 <<
" and " << token_receiver_->getReceivedTokenCount() <<
" received tokens.";
210 stop_requested_.store(
true);
211 token_receiver_->pauseTokenReception();
212 run_id_ = art::RunID::flushRun();
218 TLOG(TLVL_INFO) <<
"Pausing run " << run_id_.run()
219 <<
" after " << table_update_count_ <<
" table updates."
220 <<
" and " << token_receiver_->getReceivedTokenCount() <<
" received tokens.";
221 pause_requested_.store(
true);
227 TLOG(TLVL_DEBUG) <<
"Resuming run " << run_id_.run();
228 pause_requested_.store(
false);
229 metricMan->do_start();
235 shutdown_requested_.store(
true);
236 token_receiver_->stopTokenReception();
238 metricMan->shutdown();
244 TLOG(TLVL_INFO) <<
"soft_initialize method called with "
245 <<
"ParameterSet = \"" << pset.to_string()
247 return initialize(pset, e, f);
252 TLOG(TLVL_INFO) <<
"reinitialize method called with "
253 <<
"ParameterSet = \"" << pset.to_string()
255 return initialize(pset, e, f);
260 if (rt_priority_ > 0)
262 #pragma GCC diagnostic push
263 #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
264 sched_param s_param = {};
265 s_param.sched_priority = rt_priority_;
266 if (pthread_setschedparam(pthread_self(), SCHED_RR, &s_param) != 0)
268 TLOG(TLVL_WARNING) <<
"setting realtime priority failed";
270 #pragma GCC diagnostic pop
276 if (rt_priority_ > 0)
278 #pragma GCC diagnostic push
279 #pragma GCC diagnostic ignored "-Wmissing-field-initializers"
280 sched_param s_param = {};
281 s_param.sched_priority = rt_priority_;
282 int status = pthread_setschedparam(pthread_self(), SCHED_RR, &s_param);
286 <<
"Failed to set realtime priority to " << rt_priority_
287 <<
", return code = " << status;
289 #pragma GCC diagnostic pop
294 TLOG(TLVL_DEBUG) <<
"Sending initial table.";
295 auto startTime = artdaq::MonitoredQuantity::getCurrentTime();
296 auto nextSendTime = startTime;
298 while (!stop_requested_ && !pause_requested_)
300 startTime = artdaq::MonitoredQuantity::getCurrentTime();
302 if (startTime >= nextSendTime)
304 auto table = policy_->GetCurrentTable();
307 send_event_table(table);
308 ++table_update_count_;
309 delta_time = artdaq::MonitoredQuantity::getCurrentTime() - startTime;
310 statsHelperPtr_->addSample(TABLE_UPDATES_STAT_KEY, delta_time);
311 TLOG(16) <<
"process_fragments TABLE_UPDATES_STAT_KEY=" << delta_time;
313 bool readyToReport = statsHelperPtr_->readyToReport();
316 std::string statString = buildStatisticsString_();
317 TLOG(TLVL_INFO) << statString;
323 TLOG(TLVL_TRACE) <<
"No tokens received in this update interval (" << current_table_interval_ms_ <<
" ms)! This most likely means that the receivers are not keeping up!";
325 auto max_tokens = policy_->GetMaxNumberOfTokens();
328 auto frac = table.size() /
static_cast<double>(max_tokens);
331 current_table_interval_ms_ = 9 * current_table_interval_ms_ / 10;
335 current_table_interval_ms_ = 11 * current_table_interval_ms_ / 10;
337 if (current_table_interval_ms_ > max_table_update_interval_ms_)
339 current_table_interval_ms_ = max_table_update_interval_ms_;
341 if (current_table_interval_ms_ < 1)
343 current_table_interval_ms_ = 1;
346 nextSendTime = startTime + current_table_interval_ms_ / 1000.0;
347 TLOG(TLVL_TRACE) <<
"current_table_interval_ms is now " << current_table_interval_ms_;
351 usleep(current_table_interval_ms_ * 10);
355 if (stop_requested_ && ack_socket_ != -1) {
356 TLOG(TLVL_INFO) <<
"Shutting down Routing Manager: Draining ack socket BEGIN";
363 if (errno == EWOULDBLOCK || errno == EAGAIN)
365 TLOG(20) <<
"No more ack datagrams on ack socket.";
370 TLOG(TLVL_ERROR) <<
"An unexpected error occurred during ack packet receive";
375 TLOG(TLVL_INFO) <<
"Shutting down Routing Manager: Draining ack socket END";
378 TLOG(TLVL_DEBUG) <<
"stop_requested_ is " << stop_requested_ <<
", pause_requested_ is " << pause_requested_ <<
", exiting process_event_table loop";
380 metricMan->do_stop();
386 if (table_socket_ == -1)
388 table_socket_ = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
389 if (table_socket_ < 0)
391 TLOG(TLVL_ERROR) <<
"I failed to create the socket for sending Data Requests! Errno: " << errno;
394 auto sts =
ResolveHost(send_tables_address_.c_str(), send_tables_port_, send_tables_addr_);
397 TLOG(TLVL_ERROR) <<
"Unable to resolve table_update_address";
402 if (multicast_out_hostname_ !=
"localhost")
404 TLOG(TLVL_DEBUG) <<
"Making sure that multicast sending uses the correct interface for hostname " << multicast_out_hostname_;
409 throw art::Exception(art::errors::Configuration) <<
"RoutingManagerCore: Unable to determine the multicast interface address from the routing_manager_address parameter value of " << multicast_out_hostname_ << std::endl;
412 char addr_str[INET_ADDRSTRLEN];
413 inet_ntop(AF_INET, &(addr), addr_str, INET_ADDRSTRLEN);
414 TLOG(TLVL_INFO) <<
"Successfully determined the multicast interface address for " << multicast_out_hostname_ <<
": " << addr_str <<
" (RoutingManager sending table updates to BoardReaders)";
416 if (setsockopt(table_socket_, SOL_SOCKET, SO_REUSEADDR, &yes,
sizeof(yes)) < 0)
418 TLOG(TLVL_ERROR) <<
"RoutingManagerCore: Unable to enable port reuse on table update socket";
419 throw art::Exception(art::errors::Configuration) <<
"RoutingManagerCore: Unable to enable port reuse on table update socket" << std::endl;
423 if (setsockopt(table_socket_, IPPROTO_IP, IP_MULTICAST_LOOP, &yes,
sizeof(yes)) < 0)
425 TLOG(TLVL_ERROR) <<
"Unable to enable multicast loopback on table socket";
428 if (setsockopt(table_socket_, IPPROTO_IP, IP_MULTICAST_IF, &addr,
sizeof(addr)) == -1)
430 TLOG(TLVL_ERROR) <<
"Cannot set outgoing interface. Errno: " << errno;
434 if (setsockopt(table_socket_, SOL_SOCKET, SO_BROADCAST, &yes,
sizeof(yes)) == -1)
436 TLOG(TLVL_ERROR) <<
"Cannot set request socket to broadcast. Errno: " << errno;
442 if (ack_socket_ == -1)
444 ack_socket_ = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
447 throw art::Exception(art::errors::Configuration) <<
"RoutingManagerCore: Error creating socket for receiving table update acks!" << std::endl;
451 struct sockaddr_in si_me_request;
454 if (setsockopt(ack_socket_, SOL_SOCKET, SO_REUSEADDR, &yes,
sizeof(yes)) < 0)
456 TLOG(TLVL_ERROR) <<
"RoutingManagerCore: Unable to enable port reuse on ack socket. errno=" << errno;
457 throw art::Exception(art::errors::Configuration) <<
"RoutingManagerCore: Unable to enable port reuse on ack socket" << std::endl;
464 socklen_t arglen =
sizeof(len);
465 sts = getsockopt(ack_socket_, SOL_SOCKET, SO_RCVBUF, &len, &arglen);
466 TLOG(TLVL_INFO) <<
"ACK RCVBUF initial: " << len <<
" sts/errno=" << sts <<
"/" << errno <<
" arglen=" << arglen;
468 memset(&si_me_request, 0,
sizeof(si_me_request));
469 si_me_request.sin_family = AF_INET;
470 si_me_request.sin_port = htons(receive_acks_port_);
471 si_me_request.sin_addr.s_addr = htonl(INADDR_ANY);
472 if (bind(ack_socket_, reinterpret_cast<struct sockaddr*>(&si_me_request),
sizeof(si_me_request)) == -1)
474 TLOG(TLVL_ERROR) <<
"RoutingManagerCore: Cannot bind request socket to port " << receive_acks_port_ <<
", errno=" << errno;
475 throw art::Exception(art::errors::Configuration) <<
"RoutingManagerCore: Cannot bind request socket to port " << receive_acks_port_ << std::endl;
478 TLOG(TLVL_DEBUG) <<
"Listening for acks on 0.0.0.0 port " << receive_acks_port_;
481 auto acks = std::unordered_map<int, bool>();
482 for (
auto& r : active_ranks_)
487 auto start_time = std::chrono::steady_clock::now();
488 while (std::count_if(acks.begin(), acks.end(), [](std::pair<int, bool> p) {
return !p.second; }) > 0 && !stop_requested_)
495 for (
auto& ack : acks)
497 TLOG(27) <<
"Table update already acknowledged? rank " << ack.first <<
" is " << ack.second
498 <<
" (size of 'already_acknowledged_ranks bitset is " << (8 *
sizeof(header.already_acknowledged_ranks)) <<
")";
499 if (ack.first < static_cast<int>(8 *
sizeof(header.already_acknowledged_ranks)))
505 assert(packetSize +
sizeof(header) < MAX_ROUTING_TABLE_SIZE);
506 std::vector<uint8_t> buffer(packetSize +
sizeof(header));
510 TLOG(TLVL_DEBUG) <<
"Sending table information for " << header.nEntries <<
" events to multicast group " << send_tables_address_ <<
", port " << send_tables_port_ <<
", outgoing interface " << multicast_out_hostname_;
511 TRACE(16,
"headerData:0x%016lx%016lx packetData:0x%016lx%016lx", ((
unsigned long*)&header)[0], ((
unsigned long*)&header)[1], ((
unsigned long*)&packet[0])[0], ((
unsigned long*)&packet[0])[1]);
512 auto sts = sendto(table_socket_, &buffer[0], buffer.size(), 0,
reinterpret_cast<struct sockaddr*
>(&send_tables_addr_),
sizeof(send_tables_addr_));
513 if (sts != static_cast<ssize_t>(buffer.size()))
515 TLOG(TLVL_ERROR) <<
"Error sending routing table. sts=" << sts;
520 auto first = packet[0].sequence_id;
521 auto last = packet.rbegin()->sequence_id;
522 TLOG(TLVL_DEBUG) <<
"Sent " << sts <<
" bytes. Expecting acks to have first= " << first <<
", and last= " << last;
524 auto startTime = std::chrono::steady_clock::now();
525 while (std::count_if(acks.begin(), acks.end(), [](std::pair<int, bool> p) {
return !p.second; }) > 0)
527 auto table_ack_wait_time_ms = current_table_interval_ms_ / max_ack_cycle_count_;
528 if (TimeUtils::GetElapsedTimeMilliseconds(startTime) > table_ack_wait_time_ms)
530 if (++counter > max_ack_cycle_count_ && table_update_count_ > 0)
532 TLOG(TLVL_WARNING) <<
"Did not receive acks from all senders after resending table " << counter
533 <<
" times during the table_update_interval. Check the status of the senders!";
537 TLOG(TLVL_WARNING) <<
"Did not receive acks from all senders within the timeout (" << table_ack_wait_time_ms <<
" ms). Resending table update";
540 if (std::count_if(acks.begin(), acks.end(), [](std::pair<int, bool> p) {
return !p.second; }) <= 3)
542 auto ackIter = acks.begin();
543 while (ackIter != acks.end())
545 if (!ackIter->second)
547 TLOG(TLVL_TRACE) <<
"Did not receive ack from rank " << ackIter->first;
555 TLOG(20) <<
"send_event_table: Polling Request socket for new requests";
562 if (errno == EWOULDBLOCK || errno == EAGAIN)
564 TLOG(20) <<
"send_event_table: No more ack datagrams on ack socket.";
569 TLOG(TLVL_ERROR) <<
"An unexpected error occurred during ack packet receive";
575 TLOG(TLVL_DEBUG) <<
"Ack packet from rank " << buffer.
rank <<
" has first= " << buffer.
first_sequence_id
579 TLOG(TLVL_DEBUG) <<
"Received table update acknowledgement from sender with rank " << buffer.
rank <<
".";
580 acks[buffer.
rank] =
true;
581 TLOG(TLVL_DEBUG) <<
"There are now " << std::count_if(acks.begin(), acks.end(), [](std::pair<int, bool> p) {
return !p.second; })
582 <<
" acks outstanding";
584 else if ((acks.count(buffer.
rank) != 0u) && detail::RoutingAckPacket::isEndOfDataRoutingAckPacket(buffer))
586 TLOG(TLVL_INFO) <<
"Received table update acknowledgement indicating end-of-data from rank " << buffer.
rank <<
".";
587 acks[buffer.
rank] =
true;
588 active_ranks_.erase(buffer.
rank);
592 if (acks.count(buffer.
rank) == 0u)
594 TLOG(TLVL_ERROR) <<
"Received acknowledgement from invalid rank " << buffer.
rank <<
"!"
595 <<
" Cross-talk between RoutingManagers means there's a configuration error!";
599 TLOG(TLVL_WARNING) <<
"Received acknowledgement from rank " << buffer.
rank
600 <<
" that had incorrect sequence ID information. Discarding."
601 <<
" Expected first/last=" << first <<
"/" << last
607 usleep(table_ack_wait_time_ms * 1000 / 10);
613 artdaq::TimeUtils::seconds delta = std::chrono::steady_clock::now() - start_time;
614 metricMan->sendMetric(
"Avg Table Acknowledge Time", delta.count(),
"seconds", 3, MetricMode::Average);
620 std::string resultString;
623 auto tmpString = app_name +
" run number = " + std::to_string(run_id_.run()) +
", table updates sent = " + std::to_string(table_update_count_) +
", Receiver tokens received = " + std::to_string(token_receiver_->getReceivedTokenCount());
627 std::string artdaq::RoutingManagerCore::buildStatisticsString_()
const
629 std::ostringstream oss;
630 oss << app_name <<
" statistics:" << std::endl;
632 auto mqPtr = artdaq::StatisticsCollection::getInstance().getMonitoredQuantity(TABLE_UPDATES_STAT_KEY);
633 if (mqPtr !=
nullptr)
635 artdaq::MonitoredQuantityStats stats;
636 mqPtr->getStats(stats);
637 oss <<
" Table Update statistics: "
638 << stats.recentSampleCount <<
" table updates sent at "
639 << stats.recentSampleRate <<
" table updates/sec, , monitor window = "
640 << stats.recentDuration <<
" sec" << std::endl;
641 oss <<
" Average times per table update: ";
642 if (stats.recentSampleRate > 0.0)
644 oss <<
" elapsed time = "
645 << (1.0 / stats.recentSampleRate) <<
" sec";
647 oss <<
", avg table acknowledgement wait time = "
648 << (mqPtr->getRecentValueSum() / sender_ranks_.size()) <<
" sec" << std::endl;
651 mqPtr = artdaq::StatisticsCollection::getInstance().getMonitoredQuantity(TOKENS_RECEIVED_STAT_KEY);
652 if (mqPtr !=
nullptr)
654 artdaq::MonitoredQuantityStats stats;
655 mqPtr->getStats(stats);
656 oss <<
" Received Token statistics: "
657 << stats.recentSampleCount <<
" tokens received at "
658 << stats.recentSampleRate <<
" tokens/sec, , monitor window = "
659 << stats.recentDuration <<
" sec" << std::endl;
660 oss <<
" Average times per token: ";
661 if (stats.recentSampleRate > 0.0)
663 oss <<
" elapsed time = "
664 << (1.0 / stats.recentSampleRate) <<
" sec";
666 oss <<
", input token wait time = "
667 << mqPtr->getRecentValueSum() <<
" sec" << std::endl;
673 void artdaq::RoutingManagerCore::sendMetrics_()
677 auto mqPtr = artdaq::StatisticsCollection::getInstance().getMonitoredQuantity(TABLE_UPDATES_STAT_KEY);
678 if (mqPtr !=
nullptr)
680 artdaq::MonitoredQuantityStats stats;
681 mqPtr->getStats(stats);
682 metricMan->sendMetric(
"Table Update Count", stats.fullSampleCount,
"updates", 1, MetricMode::LastPoint);
683 metricMan->sendMetric(
"Table Update Rate", stats.recentSampleRate,
"updates/sec", 1, MetricMode::Average);
684 metricMan->sendMetric(
"Average Sender Acknowledgement Time", (mqPtr->getRecentValueSum() / sender_ranks_.size()),
"seconds", 3, MetricMode::Average);
687 mqPtr = artdaq::StatisticsCollection::getInstance().getMonitoredQuantity(TOKENS_RECEIVED_STAT_KEY);
688 if (mqPtr !=
nullptr)
690 artdaq::MonitoredQuantityStats stats;
691 mqPtr->getStats(stats);
692 metricMan->sendMetric(
"Receiver Token Count", stats.fullSampleCount,
"updates", 1, MetricMode::LastPoint);
693 metricMan->sendMetric(
"Receiver Token Rate", stats.recentSampleRate,
"updates/sec", 1, MetricMode::Average);
694 metricMan->sendMetric(
"Total Receiver Token Wait Time", mqPtr->getRecentValueSum(),
"seconds", 3, MetricMode::Average);
bool reinitialize(fhicl::ParameterSet const &pset, uint64_t e, uint64_t f)
Reinitializes the RoutingManagerCore.
This class manages MonitoredQuantity instances for the *Core classes.
int ResolveHost(char const *host_in, in_addr &addr)
Convert a string hostname to a in_addr suitable for socket communication.
bool shutdown(uint64_t)
Shuts Down the RoutingManagerCore.
bool start(art::RunID id, uint64_t, uint64_t)
Start the RoutingManagerCore.
A row of the Routing Table.
Events should be routed by sequence ID (BR -> EB)
A RoutingAckPacket contains the rank of the table receiver, plus the first and last sequence IDs in t...
static const std::string TABLE_UPDATES_STAT_KEY
Key for Table Update count MonnitoredQuantity.
int GetInterfaceForNetwork(char const *host_in, in_addr &addr)
Convert an IP address to the network address of the interface sharing the subnet mask.
bool pause(uint64_t, uint64_t)
Pauses the RoutingManagerCore.
Fragment::sequence_id_t first_sequence_id
The first sequence ID in the received RoutingPacket.
Fragment::sequence_id_t last_sequence_id
The last sequence ID in the received RoutingPacket.
std::vector< RoutingPacketEntry > RoutingPacket
A RoutingPacket is simply a vector of RoutingPacketEntry objects. It is not suitable for network tran...
int rank
The rank from which the RoutingAckPacket came.
Events should be routed by send count (EB -> Agg)
bool soft_initialize(fhicl::ParameterSet const &pset, uint64_t e, uint64_t f)
Soft-Initializes the RoutingManagerCore.
bool initialize(fhicl::ParameterSet const &pset, uint64_t, uint64_t)
Processes the initialize request.
bool stop(uint64_t, uint64_t)
Stops the RoutingManagerCore.
void process_event_table()
Main loop of the RoutingManagerCore. Determines when to send the next table update, asks the RoutingManagerPolicy for the table to send, and sends it.
static const std::string TOKENS_RECEIVED_STAT_KEY
Key for the Tokens Received MonitoredQuantity.
std::string report(std::string const &) const
Send a report on the current status of the RoutingManagerCore.
bool resume(uint64_t, uint64_t)
Resumes the RoutingManagerCore.
void send_event_table(detail::RoutingPacket packet)
Sends a detail::RoutingPacket to the table receivers.
RoutingManagerCore()
RoutingManagerCore Constructor.
std::shared_ptr< RoutingManagerPolicy > makeRoutingManagerPolicy(std::string const &policy_plugin_spec, fhicl::ParameterSet const &ps)
Load a RoutingManagerPolicy plugin.