1 #define TRACE_NAME "routing_master_t"
3 #include "artdaq/DAQrate/detail/RoutingPacket.hh"
4 #include "artdaq/DAQdata/TCPConnect.hh"
6 #include "cetlib/filepath_maker.h"
7 #include "fhiclcpp/ParameterSet.h"
8 #include "fhiclcpp/make_ParameterSet.h"
10 #include <boost/program_options.hpp>
11 #include "artdaq/Application/RoutingMasterCore.hh"
12 #include "artdaq/Application/RoutingMasterApp.hh"
14 namespace bpo = boost::program_options;
28 #include <arpa/inet.h>
29 #include <netinet/in.h>
30 #include <sys/types.h>
31 #include <sys/socket.h>
36 #include <sys/resource.h>
87 fhicl::ParameterSet
getPset(
int argc,
char* argv[])
const;
90 enum class TestRole_t : int
97 void printHost(
const std::string& functionName)
const;
99 fhicl::ParameterSet
const pset_;
100 fhicl::ParameterSet
const daq_pset_;
103 std::string routing_master_address_;
104 std::string multicast_address_;
108 std::vector<int> eb_ranks_;
110 size_t token_interval_us_;
115 , pset_(getPset(argc, argv))
116 , daq_pset_(pset_.get<fhicl::ParameterSet>(
"daq"))
117 , routing_master_address_(daq_pset_.get<std::string>(
"routing_master_hostname",
"localhost"))
118 , multicast_address_(daq_pset_.get<std::string>(
"table_update_address",
"227.128.12.28"))
119 , token_port_(daq_pset_.get<int>(
"routing_token_port", 35555))
120 , table_port_(daq_pset_.get<int>(
"table_update_port", 35556))
121 , ack_port_(daq_pset_.get<int>(
"table_acknowledge_port", 35557))
122 , token_count_(pset_.get<int>(
"token_count", 1000))
123 , token_interval_us_(pset_.get<size_t>(
"token_interval_us", 5000))
125 assert(!(my_rank < 0));
129 role_ = TestRole_t::TOKEN_GEN;
132 role_ = TestRole_t::ROUTING_MASTER;
135 role_ = TestRole_t::TABLE_RECEIVER;
138 auto policy_pset = daq_pset_.get<fhicl::ParameterSet>(
"policy");
139 eb_ranks_ = policy_pset.get<std::vector<int>>(
"receiver_ranks");
145 std::ostringstream descstr;
146 descstr <<
"-- <-c <config-file>>";
147 bpo::options_description desc(descstr.str());
149 (
"config,c", bpo::value<std::string>(),
"Configuration file.");
150 bpo::variables_map vm;
153 bpo::store(bpo::command_line_parser(argc, argv).
154 options(desc).allow_unregistered().run(), vm);
157 catch (bpo::error
const& e)
159 std::cerr <<
"Exception from command line processing in Config::getArtPset: " << e.what() <<
"\n";
160 throw "cmdline parsing error.";
162 if (!vm.count(
"config"))
164 std::cerr <<
"Expected \"-- -c <config-file>\" fhicl file specification.\n";
165 throw "cmdline parsing error.";
167 fhicl::ParameterSet pset;
168 cet::filepath_lookup lookup_policy(
"FHICL_FILE_PATH");
169 fhicl::make_ParameterSet(vm[
"config"].as<std::string>(), lookup_policy, pset);
176 TLOG(TLVL_INFO) <<
"Entering MPI_Barrier";
177 MPI_Barrier(MPI_COMM_WORLD);
178 TLOG(TLVL_INFO) <<
"Done with Barrier";
183 case TestRole_t::TABLE_RECEIVER:
186 case TestRole_t::ROUTING_MASTER:
189 case TestRole_t::TOKEN_GEN:
193 throw "No such node type";
195 TLOG(TLVL_INFO) <<
"Rank " << my_rank <<
" complete." ;
200 TLOG(TLVL_INFO) <<
"generate_tokens(): Init" ;
201 printHost(
"generate_tokens");
204 int token_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
205 if (token_socket < 0)
207 TLOG(TLVL_ERROR) <<
"generate_tokens(): I failed to create the socket for sending Routing Tokens!" ;
210 struct sockaddr_in token_addr;
211 auto sts = ResolveHost(routing_master_address_.c_str(), token_port_, token_addr);
214 TLOG(TLVL_ERROR) <<
"generate_tokens(): Could not resolve host name" ;
217 connect(token_socket, (
struct sockaddr*)&token_addr,
sizeof(token_addr));
220 std::map<int, int> token_counter;
221 for(
auto rank : eb_ranks_)
223 token_counter[rank] = 0;
225 while (sent_tokens < token_count_) {
226 int this_rank = eb_ranks_[seedAndRandom() % eb_ranks_.size()];
227 token_counter[this_rank]++;
228 artdaq::detail::RoutingToken token;
229 token.header = TOKEN_MAGIC;
230 token.rank = this_rank;
231 token.new_slots_free = 1;
233 TLOG(TLVL_INFO) <<
"generate_tokens(): Sending RoutingToken " << ++sent_tokens <<
" for rank " << this_rank <<
" to " << routing_master_address_ ;
234 send(token_socket, &token,
sizeof(artdaq::detail::RoutingToken), 0);
235 usleep(token_interval_us_);
238 for(
auto rank : token_counter)
240 if (rank.second > max_rank) max_rank = rank.second;
242 for(
auto rank : token_counter)
244 artdaq::detail::RoutingToken token;
245 token.header = TOKEN_MAGIC;
246 token.rank = rank.first;
247 token.new_slots_free = max_rank - rank.second;
249 TLOG(TLVL_INFO) <<
"generate_tokens(): Sending RoutingToken " << ++sent_tokens <<
" for rank " << rank.first <<
" to " << routing_master_address_ ;
250 send(token_socket, &token,
sizeof(artdaq::detail::RoutingToken), 0);
251 usleep(token_interval_us_);
255 TLOG(TLVL_INFO) <<
"generate_tokens(): Waiting at MPI_Barrier" ;
256 MPI_Barrier(MPI_COMM_WORLD);
257 TLOG(TLVL_INFO) <<
"generate_tokens(): Done with MPI_Barrier" ;
262 TLOG(TLVL_INFO) <<
"table_receiver(): Init" ;
263 printHost(
"table_receiver");
266 auto table_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
267 if (table_socket < 0)
269 TLOG(TLVL_ERROR) <<
"table_receiver(): Error creating socket for receiving data requests!" ;
273 struct sockaddr_in si_me_request;
276 if (setsockopt(table_socket, SOL_SOCKET, SO_REUSEADDR, &yes,
sizeof(yes)) < 0)
278 TLOG(TLVL_ERROR) <<
"table_receiver(): Unable to enable port reuse on request socket" ;
281 memset(&si_me_request, 0,
sizeof(si_me_request));
282 si_me_request.sin_family = AF_INET;
283 si_me_request.sin_port = htons(table_port_);
284 si_me_request.sin_addr.s_addr = htonl(INADDR_ANY);
285 if (bind(table_socket, (
struct sockaddr *)&si_me_request,
sizeof(si_me_request)) == -1)
287 TLOG(TLVL_ERROR) <<
"table_receiver(): Cannot bind request socket to port " << table_port_ ;
292 long int sts = ResolveHost(multicast_address_.c_str(), mreq.imr_multiaddr);
295 TLOG(TLVL_ERROR) <<
"table_receiver(): Unable to resolve multicast hostname" ;
298 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
299 if (setsockopt(table_socket, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq,
sizeof(mreq)) < 0)
301 TLOG(TLVL_ERROR) <<
"table_receiver(): Unable to join multicast group" ;
305 struct epoll_event ev;
306 int table_epoll_fd = epoll_create1(0);
307 ev.events = EPOLLIN | EPOLLPRI;
308 ev.data.fd = table_socket;
309 if (epoll_ctl(table_epoll_fd, EPOLL_CTL_ADD, table_socket, &ev) == -1)
311 TLOG(TLVL_ERROR) <<
"table_receiver(): Could not register listen socket to epoll fd" ;
315 auto ack_socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
316 struct sockaddr_in ack_addr;
317 sts = ResolveHost(routing_master_address_.c_str(), ack_port_, ack_addr);
320 TLOG(TLVL_ERROR) <<
"table_receiver(): Unable to resolve routing master hostname" ;
324 if (table_socket == -1 || table_epoll_fd == -1 || ack_socket == -1)
326 TLOG(TLVL_INFO) <<
"table_receiver(): One of the listen sockets was not opened successfully." ;
329 artdaq::Fragment::sequence_id_t max_sequence_id = token_count_;
330 artdaq::Fragment::sequence_id_t current_sequence_id = 0;
331 std::map<artdaq::Fragment::sequence_id_t, int> routing_table;
332 TLOG(TLVL_INFO) <<
"table_receiver(): Expecting " << max_sequence_id <<
" as the last Sequence ID in this run" ;
333 while (current_sequence_id < max_sequence_id)
335 std::vector<epoll_event> table_events_(4);
336 TLOG(TLVL_INFO) <<
"table_receiver(): Waiting for event on table socket" ;
337 auto nfds = epoll_wait(table_epoll_fd, &table_events_[0], table_events_.size(), -1);
339 perror(
"epoll_wait");
343 TLOG(TLVL_INFO) <<
"table_receiver(): Received " << nfds <<
" table update(s)" ;
344 for (
auto n = 0; n < nfds; ++n) {
345 auto first = artdaq::Fragment::InvalidSequenceID;
346 auto last = artdaq::Fragment::InvalidSequenceID;
347 artdaq::detail::RoutingPacketHeader hdr;
348 recv(table_events_[n].data.fd, &hdr,
sizeof(artdaq::detail::RoutingPacketHeader), 0);
350 TLOG(TLVL_INFO) <<
"table_receiver(): Checking for valid header" ;
351 if (hdr.header == ROUTING_MAGIC) {
352 artdaq::detail::RoutingPacket buffer(hdr.nEntries);
353 TLOG(TLVL_INFO) <<
"table_receiver(): Receiving data buffer" ;
354 sts = recv(table_events_[n].data.fd, &buffer[0],
sizeof(artdaq::detail::RoutingPacketEntry) * hdr.nEntries, 0);
355 assert(static_cast<size_t>(sts) ==
sizeof(artdaq::detail::RoutingPacketEntry) * hdr.nEntries);
357 first = buffer[0].sequence_id;
358 last = buffer[buffer.size() - 1].sequence_id;
360 for (
auto entry : buffer)
362 if (routing_table.count(entry.sequence_id))
364 assert(routing_table[entry.sequence_id] == entry.destination_rank);
367 routing_table[entry.sequence_id] = entry.destination_rank;
368 TLOG(TLVL_INFO) <<
"table_receiver(): table_receiver " << my_rank <<
": received update: SeqID " << entry.sequence_id <<
" -> Rank " << entry.destination_rank ;
371 artdaq::detail::RoutingAckPacket ack;
373 ack.first_sequence_id = first;
374 ack.last_sequence_id = last;
376 TLOG(TLVL_INFO) <<
"table_receiver(): Sending RoutingAckPacket with first= " << first <<
" and last= " << last <<
" to " << routing_master_address_ <<
", port " << ack_port_ ;
377 sendto(ack_socket, &ack,
sizeof(artdaq::detail::RoutingAckPacket), 0, (
struct sockaddr *)&ack_addr,
sizeof(ack_addr));
378 current_sequence_id = last;
383 TLOG(TLVL_INFO) <<
"table_receiver(): Waiting at MPI_Barrier" ;
384 MPI_Barrier(MPI_COMM_WORLD);
385 TLOG(TLVL_INFO) <<
"table_receiver(): Done with MPI_Barrier" ;
390 TLOG(TLVL_INFO) <<
"routing_master: Init" ;
391 printHost(
"routing_master");
393 app_name =
"RoutingMaster";
395 auto app = std::make_unique<artdaq::RoutingMasterApp>();
397 app->initialize(pset_, 0, 0);
398 app->do_start(art::RunID(1), 0, 0);
399 TLOG(TLVL_INFO) <<
"routing_master: Waiting at MPI_Barrier" ;
400 MPI_Barrier(MPI_COMM_WORLD);
401 TLOG(TLVL_INFO) <<
"routing_master: Done with MPI_Barrier, calling RoutingMasterCore::stop" ;
403 TLOG(TLVL_INFO) <<
"routing_master: Done with RoutingMasterCore::stop, calling shutdown" ;
405 TLOG(TLVL_INFO) <<
"routing_master: Done with RoutingMasterCore::shutdown" ;
408 void RoutingMasterTest::printHost(
const std::string& functionName)
const
410 char* doPrint = getenv(
"PRINT_HOST");
411 if (doPrint == 0) {
return; }
412 const int ARRSIZE = 80;
413 char hostname[ARRSIZE];
414 std::string hostString;
415 if (!gethostname(hostname, ARRSIZE))
417 hostString = hostname;
421 hostString =
"unknown";
423 TLOG(TLVL_INFO) <<
"Running " << functionName
424 <<
" on host " << hostString
425 <<
" with rank " << my_rank <<
"."
433 getrusage(RUSAGE_SELF, &usage);
434 std::cout << myid <<
":"
435 <<
" user=" << artdaq::TimeUtils::convertUnixTimeToSeconds(usage.ru_utime)
436 <<
" sys=" << artdaq::TimeUtils::convertUnixTimeToSeconds(usage.ru_stime)
440 int main(
int argc,
char* argv[])
442 artdaq::configureMessageFacility(
"routing_master",
false);
445 std::cerr <<
"PID: " << getpid() << std::endl;
446 volatile bool attach =
true;
456 std::cerr <<
"Started process " << my_rank <<
" of " << p.procs_ <<
".\n";
460 catch (std::string& x)
462 std::cerr <<
"Exception (type string) caught in routing_master: "
467 catch (
char const* m)
469 std::cerr <<
"Exception (type char const*) caught in routing_master: ";
476 std::cerr <<
"[the value was a null pointer, so no message is available]";
The RoutingMasterTest class runs the routing_master test.
void routing_master()
Load a RoutingMasterCore instance, receive tokens from the token generators, and send table updates t...
A wrapper for a MPI program. Similar to MPISentry.
void go()
Start the test, using the role assigned.
RoutingMasterTest(int argc, char *argv[])
RoutingMasterTest Constructor.
void table_receiver()
Receive Routing Tables from the Routing Master and send acknowledgement packets back.
fhicl::ParameterSet getPset(int argc, char *argv[]) const
Parse the command line arguments and load a configuration FHiCL file.
void generate_tokens()
Generate tokens and send them to the Routing Master.