diff -Nru pmacct-1.6.1/aclocal.m4 pmacct-1.7.0/aclocal.m4 --- pmacct-1.6.1/aclocal.m4 2016-10-31 18:59:32.000000000 +0000 +++ pmacct-1.7.0/aclocal.m4 2017-10-20 20:19:57.000000000 +0000 @@ -1187,6 +1187,7 @@ ]) # _AM_PROG_TAR m4_include([m4/ac_linearize_path.m4]) +m4_include([m4/ax_lib_mysql.m4]) m4_include([m4/libtool.m4]) m4_include([m4/ltoptions.m4]) m4_include([m4/ltsugar.m4]) diff -Nru pmacct-1.6.1/AUTHORS pmacct-1.7.0/AUTHORS --- pmacct-1.6.1/AUTHORS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/AUTHORS 2017-10-20 16:56:19.000000000 +0000 @@ -1,5 +1,5 @@ -pmacct (Promiscuous mode IP Accounting package) v1.6.1 -pmacct is Copyright (C) 2003-2016 by Paolo Lucente +pmacct [IP traffic accounting : BGP : BMP : IGP : Streaming Telemetry] +pmacct is Copyright (C) 2003-2017 by Paolo Lucente Founder: @@ -24,9 +24,9 @@ Robert Blechinger Stefano Birmani Codethink.co.uk + Pier Carlo Chiodi Arnaud De-Bermingham Francois Deppierraz - Marcello Di Leonardo Pierre Francois Rich Gade Aaron Glenn @@ -46,7 +46,9 @@ Gabriel Snook Rene Stoutjesdijk Thomas Telkamp + Matthieu Texier Stig Thormodsrud Luca Tosolini Brent Van Dussen + Markus Weber Chris Wilson diff -Nru pmacct-1.6.1/bin/configure-help-replace.txt pmacct-1.7.0/bin/configure-help-replace.txt --- pmacct-1.6.1/bin/configure-help-replace.txt 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/bin/configure-help-replace.txt 2017-10-20 16:56:19.000000000 +0000 @@ -17,6 +17,8 @@ SQLITE3_LIBS linker flags for SQLITE3, overriding pkg-config RABBITMQ_CFLAGS C compiler flags for RABBITMQ, overriding pkg-config RABBITMQ_LIBS linker flags for RABBITMQ, overriding pkg-config + ZMQ_CFLAGS C compiler flags for ZMQ, overriding pkg-config + ZMQ_LIBS linker flags for ZMQ, overriding pkg-config KAFKA_CFLAGS C compiler flags for KAFKA, overriding pkg-config KAFKA_LIBS linker flags for KAFKA, overriding pkg-config GEOIP_CFLAGS C compiler flags for GEOIP, overriding pkg-config @@ -29,4 +31,6 @@ AVRO_LIBS linker flags for AVRO, overriding pkg-config NFLOG_CFLAGS C compiler flags for NFLOG, overriding pkg-config NFLOG_LIBS linker flags for NFLOG, overriding pkg-config + NDPI_CFLAGS C compiler flags for dynamic nDPI, overriding pkg-config + NDPI_LIBS linker flags for dynamic nDPI, overriding pkg-config diff -Nru pmacct-1.6.1/ChangeLog pmacct-1.7.0/ChangeLog --- pmacct-1.6.1/ChangeLog 2016-10-30 23:50:56.000000000 +0000 +++ pmacct-1.7.0/ChangeLog 2017-10-20 17:00:56.000000000 +0000 @@ -1,5 +1,320 @@ -pmacct (Promiscuous mode IP Accounting package) v1.6.1 -pmacct is Copyright (C) 2003-2016 by Paolo Lucente +pmacct [IP traffic accounting : BGP : BMP : IGP : Streaming Telemetry] +pmacct is Copyright (C) 2003-2017 by Paolo Lucente + +The keys used are: + !: fixed/modified feature, -: deleted feature, +: new feature + +1.7.0 -- 21-10-2017 + + ZeroMQ integration: by defining plugin_pipe_zmq to 'true', ZeroMQ is + used for queueing between the Core Process and plugins. This is in + alternative to the home-grown circular queue implementation (ie. + plugin_pipe_size). plugin_pipe_zmq_profile can be set to one value + of { micro, small, medium, large, xlarge } and allows to select + among a few standard buffering profiles without having to fiddle + with plugin_buffer_size. How to compile, install and operate ZeroMQ + is documented in the "Internal buffering and queueing" section of + the QUICKSTART document. + + nDPI integration: enables packet classification, replacing existing + L7-layer project integration, and is available for pmacctd and + uacctd. The feature, once nDPI is compiled in, is simply enabled by + specifying 'class' as part of the aggregation method. How to compile + install and operate nDPI is documented in the "Quickstart guide to + packet classification" section of the QUICKSTART document. + + nfacctd: introduced nfacctd_templates_file so that NetFlow v9/IPFIX + templates can be cached to disk to limit the amount of lost packets + due to unknown templates when nfacctd (re)starts. The implementation + is courtesy by Codethink Ltd. + + nfacctd: introduced support for PEN on IPFIX option templates. This + is in addition to already supported PEN for data templates. Thanks + to Gilad Zamoshinski ( @zamog ) for his support. + + sfacctd: introduced new aggregation primitives (tunnel_src_host, + tunnel_dst_host, tunnel_proto, tunnel_tos) to support inner L3 + layers. Thanks to Kaname Nishizuka ( @__kaname__ ) for his support. + + nfacctd, sfacctd: pcap_savefile and pcap_savefile_wait were ported + from pmacctd. They allow to process NetFlow/IPFIX and sFlow data + from previously captured packets; these also ease some debugging by + not having to resort anymore to tcpreplay for most cases. + + pmacctd, sfacctd: nfacctd_time_new feature has been ported so, when + historical accounting is enabled, to allow to choose among capture + time and time of receipt at the collector for time-binning. + + nfacctd: added support for NetFlow v9/IPFIX field types #130/#131, + respectively the IPv4/IPv6 address of the element exporter. + + nfacctd: introduced nfacctd_disable_opt_scope_check: mainly a work + around to implementations not encoding NetFlow v9/IPIFX option scope + correctly, this knob allows to disable option scope checking. Thanks + to Gilad Zamoshinski ( @zamog ) for his support. + + pre_tag_map: added 'source_id' key for tagging on NetFlow v9/IPFIX + source_id field. Added also 'fwdstatus' for tagging on NetFlow v9/ + IPFIX information element #89: this implementation is courtesy by + Emil Palm ( @mrevilme ). + + tee plugin: tagging is now possible on NetFlow v5-v8 engine_type/ + engine_id, NetFlow v9/IPFIX source_id and sFlow AgentId. + + tee plugin: added support for 'src_port' in tee_receivers map. When + in non-transparent replication mode, use the specified UDP port to + send data to receiver(s). This is in addition to tee_source_ip, + which allows to set a configured IP address as source. + + networks_no_mask_if_zero: a new knob so that IP prefixes with zero + mask - that is, unknown ones or those hitting a default route - are + not masked. The feature applies to *_net aggregation primitives and + makes sure individual IP addresses belonging to unknown IP prefixes + are not zeroed out. + + networks_file: hooked up networks_file_no_lpm feature to peer and + origin ASNs and (BGP) next-hop fields. + + pmacctd: added support for calling pcap_set_protocol() if supported + by libpcap. Patch is courtesy by Lennert Buytenhek ( @buytenh ). + + pmbgpd, pmbmpd, pmtelemetryd: added a few CL options to ease output + of BGP, BMP and Streaming Telemetry data, for example: -o supplies + a b[gm]p_daemon_msglog_file, -O supplies a b[gm]p_dump_file and -i + supplies b[gm]p_dump_refresh_time. + + kafka plugin: in the examples section, added a Kafka consumer script + using the performing confluent-kafka-python module. + ! fix, BGP daemon: segfault with add-path enabled peers as per issue + #128. Patch is courtesy by Markus Weber ( @FvDxxx ). + ! fix, print plugin: do not update link to latest file if cause of + purging is a safe action (ie. cache space is finished. Thanks to + Camilo Cardona ( @jccardonar ) for reporting the issue. Also, for + the same reason, do not execute triggers (ie. print_trigger_exec). + ! fix, nfacctd: improved IP protocol check in NF_evaluate_flow_type() + A missing length check was causing, under certain conditions, some + flows to be marked as IPv6. Many thanks to Yann Belin for his + support resolving the issue. + ! fix, print and SQL plugins: optimized the cases when the dynamic + filename/table has to be re-evaluated. This results in purge speed + gains when the dynamic part is time-related and nfacctd_time_new is + set to true. + ! fix, bgp_daemon_md5_file: if the server socket is AF_INET and the + compared peer address in MD5 file is AF_INET6 (v4-mapped v6), pass + it through ipv4_mapped_to_ipv4(). Also if the server socket is + AF_INET6 and the compared peer addess in MD5 file is AF_INET, pass + it through ipv4_to_ipv4_mapped(). Thanks to Paul Mabey for reporting + the issue. + ! fix, nfacctd: improved length checks in resolve_vlen_template() to + prevent SEGVs. Thanks to Josh Suhr and Levi Mason for their support. + ! fix, nfacctd: flow stitching, improved flow end time checks. Thanks + to Fabio Bindi ( @FabioLiv ) for his support resolving the issue. + ! fix, amqp_common.c: amqp_persistent_msg now declares the RabbitMQ + exchange as durable in addition to marking messages as persistent; + this is related to issue #148. + ! fix, nfacctd: added flowset count check to existing length checks + for NetFlow v9/IPFIX datagrams. This is to avoid logs flooding in + case of padding. Thanks to Steffen Plotner for reporting the issue. + ! fix, BGP daemon: when dumping BGP data at regular time intervals, + dump_close message contained wrongly formatted timestamp. Thanks to + Yuri Lachin for reporting the issue. + ! fix, MySQL plugin: if --enable-ipv6 and sql_num_hosts set to true, + use INET6_ATON for both v4 and v6 addresses. Thanks to Guy Lowe + ( @gunkaaa ) for reporting the issue and his support resolving it. + ! fix, 'flows' primitive: it has been wired to sFlow so to count Flow + Samples received. This is to support Q21 in FAQS document. + ! fix, BGP daemon: Extended Communities value was printed with %d + (signed) format string instead of %u (unsigned), causing issue on + large values. + ! fix, aggregate_primitives: improved support of 'u_int' semantics for + 8 bytes integers. This is in addition to already supported 1, 2 and + 4 bytes integers. + ! fix, pidfile: pidfile created by plugin processes was not removed. + Thanks to Yuri Lachin for reporting the issue. + ! fix, print plugin: checking non-null file descriptor before setvbuf + in order to prevent SEGV. Similar checks were added to prevent nulls + be input to libavro calls when Apache Avro output is selected. + ! fix, SQL plugins: MPLS aggregation primitives were not correctly + activated in case sql_optimize_clauses was set to false. + ! fix, building system: reviewed minimum requirement for libraries, + removed unused m4 macros, split features in plugins (ie. MySQL) and + supports (ie. JSON). + ! fix, sql_history: it now correctly honors periods expressed is 's' + seconds. + ! fix, BGP daemon: rewritten bgp_peer_print() to be thread safe. + ! fix, pretag.h: addressed compiler warning on 32-bit architectures, + integer constant is too large for "long" type. Thanks to Stephen + Clark ( @sclark46 ) for reporting the issue. + - MongoDB plugin: it is being discontinued since the old Mongo API is + not supported anymore and there has never been enough push from the + community to transition to the new/current API (which would require + a rewrite of most of the plugin). In this phase-1 the existing + MongoDB plugin is still available using 'plugins: mongodb_legacy' + in the configuration. + - Packet classification basing on the L7-filter project is being + discontinued (ie. 'classifiers' directive). This is being replaced + by an implementation basing on the nDPI project. As part of this + also the sql_aggressive_classification knob has been discontinued. + - tee_receiver was part of the original implementation of the tee + plugin, allowing to forward to a single target and hence requiring + multiple plugins instantiated, one per target. Since 0.14.3 this + directive was effectively outdated by tee_receivers. + - tmp_net_own_field: the knob has been discontinued and was allowing + to revert to backward compatible behaviour of IP prefixes (ie. + src_net) being written in the same field as IP addresses (ie. + src_host). + - tmp_comms_same_field: the knob has been discontinued and was + allowing to revert to backward compatible behaviour of BGP + communities (standard and extended) being writeen all in the same + field. + - plugin_pipe_amqp and plugin_pipe_kafka features were meant as an + alternative to the homegrown queue solution for internal messaging, + ie. passing data from the Core Process to Plugins, and are being + discontinued. They are being replaced by a new implementation, + plugin_pipe_zmq, basing on ZeroMQ. + - plugin_pipe_backlog was allowing to keep an artificial backlog of + data in the Core Process so to maximise bypass poll() syscalls in + plugins. If home-grown queueing is found limiting, instead of + falling back to such strategies, ZeroMQ queueing should be used. + - pmacctd: deprecated support for legacy link layers: FDDI, Token Ring + and HDLC. + +1.6.2 -- 21-04-2017 + + BGP, BMP daemons: introduced support for BGP Large Communities IETF + draft (draft-ietf-idr-large-community). Large Communities are stored + in a variable-length field. Thanks to Job Snijders ( @job ) for his + support. + + BGP daemon: implemented draft-ietf-idr-shutdown. The draft defines a + mechanism to transmit a short freeform UTF-8 message as part of a + Cease NOTIFICATION message to inform the peer why the BGP session is + being shutdown or reset. Thanks to Job Snijders ( @job ) for his + support. + + tee plugin, pre_tag_map: introduced support for inspetion of specific + flow primitives and selective replication over them. The primitives + supported are: input and output interfaces, source and destination + MAC addresses, VLAN ID. The feature is now limited to sFlow v5 only. + Thanks to Nick Hilliard and Barry O'Donovan for their support. + + Added src_host_pocode and dst_host_pocode primitives, pocode being a + compact and (de-)aggregatable (easy to identify districts, cities, + metro areas, etc.) geographical representation, based on the Maxmind + v2 City Database. Thanks to Jerred Horsman for his support. + + Kafka support: introduced support for user-defined (librdkafka) config + file via the new *_kafka_config_file config directives. Full pathname + to a file containing directives to configure librdkafka is expected. + All knobs whose values are string, integer, boolean are supported. + + AMQP, Kafka plugins: introduced new directives kafka_avro_schema_topic, + amqp_avro_schema_routing_key to transmit Apache Avro schemas at regular + time intervals. The routing key/topic can overlap with the one used to + send actual data. + + AMQP, Kafka plugins: introduced support for start/stop markers when + encoding is set to Avro (ie. 'kafka_output: avro'); also Avro schema + is now embedded in a JSON envelope when sending it via a topic/routing + key (ie. kafka_avro_schema_topic). + + print plugin: introduced new config directive avro_schema_output_file + to save the Apache Avro schema in a separate file (it was only possible + to have it combined at the beginning of the data file). + + BGP daemon: introduced a new bgp_daemon_as config directive to set a + LocalAS which could be different from the remote peer one. This is to + establish an eBGP session instead of a iBGP one (default). + + flow_to_rd_map: introduced support for mpls_vpn_id. In NetFlow/IPFIX + this is compared against Field Types #234 and #235. + + sfacctd: introduced support for sFlow v2/v4 counter samples (generic, + ethernet, vlan). This is in addition to existing support for sFlow v5 + counters. + + BGP, BMP and Streming Telemetry daemons: added writer_id field when + writing to Kafka and/or RabbitMQ. The field reports the configured + core_proc_name and the actual PID of the writer process (so, while + being able to correlate writes to the same daemon, it's also possible + to distinguish among overlapping writes). + + amqp, kafka, print plugins: harmonized JSON output to the above: added + event_type field, writer_id field with plugin name and PID. + + BGP, BMP daemons: added AFI, SAFI information to log and dump outputs; + also show VPN Label if SAFI is MPLS VPN. + + pmbgpd, pmbmpd: added logics to bypass building RIBs if only logging + BGP/BMP data real-time. + + BMP daemon: added BMP peer TCP port to log and dump outputs (for NAT + traversal scenarios). Contextually, multiple TCP sessions per IP are + now supported for the same reason. + + SQL plugins: ported (from print, etc. plugins) the 1.6.1 re-working of + the max_writers feature. + + uacctd: use current time when we don't have a timestamp from netlink. + We only get a timestamp when there is a timestamp in the skb. Notably, + locally generated packets don't get a timestamp. The patch is courtesy + by Vincent Bernat ( @vincentbernat ). + + build system: added configure options for partial linking of binaries + with any selection/combination of IPv4/IPv6 accounting daemons, BGP + daemon, BMP daemon and Streaming Telemetry daemon possible. By default + all are compiled in. + + BMP daemon: internal code changes to pass additional info from BMP + per-peer header to bgp_parse_update_msg(). Goal is to expose further + info, ie. pre- vs post- policy, when logging or dumping BMP info. + ! fix, BGP daemon: introduced parsing of IPv6 MPLS VPN (vpnv6) NLRIs. + Thanks to Alberto Santos ( @m4ccbr ) for reporting the issue. + ! fix, BGP daemon: upon doing routes lookup, now correctly honouring + the case of BGP-LU (SAFI_MPLS_LABEL). + ! fix, BGP daemon: send BGP NOTIFICATION out in case of known failures + in bgp_parse_msg(). + ! fix, kafka_partition, *_kafka_partition: default value changed from 0 + (partition zero) to -1 (RD_KAFKA_PARTITION_UA, partition unassigned). + Thanks to Johan van den Dorpe ( @johanek ) for his support. + ! fix, pre_tag_map: removed constraint for 'ip' keyword for nfacctd and + sfacctd maps. While this is equivalent syntax to specifying rules with + 'ip=0.0.0.0/0', it allows for map indexing (maps_index: true). + ! fix, bgp_agent_map: improved sanity check against bgp_ip for IPv6 + addresses (ie. an issue appeared for the case of '::1' where the first + 64 bits are zeroed out). Thanks to Charlie Smurthwaite ( @catphish ) + for reporting the issue. + ! fix, maps_index: indexing now correctly works for IPv6 pre_tag_map + entries. That is, those where 'ip', the IP address of the NetFlow/ + IPFIX/sFlow exporter, is an IPv6 address. + ! fix, pre_tag_map: if mpls_vpn_rd matching condition is specified and + maps_index is enabled, PT_map_index_fdata_mpls_vpn_rd_handler() now + picks the right (and expected) info. + ! fix, pkt_handlers.c: improved definition and condition to free() in + bgp_ext_handler() in order to prevent SEGVs. Thanks to Paul Mabey for + his support. + ! fix, kafka_common.c: removed waiting time from p_kafka_set_topic(). + Added docs advicing to create in advance Kafka topics. + ! fix, sfacctd, sfprobe: tag and tag2 are now correctly re-defined as + 64 bits long. + ! fix, sfprobe plugin, sfacctd: tags and class primitives are now being + encoded/decoded using enterprise #43874, legit, instead of #8800, that + was squatted back in the times. See issue #71 on GiHub for more info. + ! fix, sfacctd: lengthCheck() + skipBytes() were producing an incorrect + jump in case of unknown flow samples. Replaced by skipBytesAndCheck(). + Thanks to Elisa Jasinska ( @fooelisa ) for her support. + ! fix, pretag_handlers.c: in bgp_agent_map added case for 'vlan and ...' + filter values. + ! fix, BGP daemon: multiple issues of partial visibility of the stored + RIBs and SEGVs when bgp_table_per_peer_buckets was not left default: + don't mess with bms->table_per_peer_buckets given the multi-threaded + scenario. Thanks to Dan Berger ( @dfberger ) for his support. + ! fix, BGP, BMP daemons: bgp_process_withdraw() function init aligned to + bgp_process_update() in order to prevent SEGVs. Thanks to Yuri Lachin + for his support. + ! fix, bgp_msg.c: Route Distinguisher was stored and printed incorrectly + when of type RD_TYPE_IP. Thanks to Alberto Santos ( @m4ccbr ) for + reporting the issue. + ! fix, bgp_logdump.c: p_kafka_set_topic() was being wrongly applied to + an amqp_host structure (instead of a kafka_host structure). Thanks to + Corentin Neau ( @weyfonk ) for reporting the issue. + ! fix, BGP daemon: improved BGP next-hop setting and comparison in cases + of MP_REACH_NLRI and MPLS VPNs. Many thanks to both Catalin Petrescu + ( @cpmarvin ) and Alberto Santos ( @m4ccbr ) for their support. + ! fix, pmbgpd, pmbmpd: pidfile was not written even if configured. Thanks + to Aaron Glenn ( @aaglenn ) for reporting the issue. + ! fix, tee plugin: tee_max_receiver_pools is now correctly honoured and + debug message shows the replicatd protocol, ie. NetFlow/IPFIX vs sFlow. + ! AMQP, Kafka plugins: separate JSON objects, newline separated, are + preferred to JSON arrays when buffering of output is enabled (ie. + kafka_multi_values) and output is set to JSON. This is due to quicker + serialisation performance shown by the Jansson library. + ! build system: switched to enable IPv6 support by default (while the + --disable-ipv6 knob can be used to reverse the behaviour). Patch is + courtesy by Elisa Jasinska ( @fooelisa ). + ! build system: given visibility, ie. via -V CL option, into compile + options enabled by default (ie. IPv6, threads, 64bit counters, etc.). + ! fix, nfprobe: free expired records when exporting to an unavailable + collector in order to prevent a memory leak. Patch is courtersy by + Vladimir Kunschikov ( @kunschikov ). + ! fix, AMQP plugin: set content type to binary in case of Apache Avro + output. + ! fix, AMQP, Kafka plugins: optimized amqp_avro_schema_routing_key and + kafka_avro_schema_topic. Avro schema is built only once at startup. + ! fix, cfg.c: improved parsing of config key-values where squared brakets + appear in the value part. Thanks to Brad Hein ( @regulatre ) for + reporting the issue. Also, detection of duplicates among plugin and + core process names was improved. + ! fix, misc: compiler warnings: fix up missing includes and prototypes; + the patch is courtesy by Tim LaBerge ( @tlaberge ). + ! kafka_consumer.py, amqp_receiver.py: Kafka, RabbitMQ consumer example + scripts have been greatly expanded to support posting to a REST API or + to a new Kafka topic, including some stats. Also conversion of multiple + newline-separated JSON objects to a JSON array has been added. Misc + bugs were fixed. 1.6.1 -- 31-10-2016 + Introduced pmbgpd daemon: a stand-alone BGP collector daemon; acts as a diff -Nru pmacct-1.6.1/CONFIG-KEYS pmacct-1.7.0/CONFIG-KEYS --- pmacct-1.6.1/CONFIG-KEYS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/CONFIG-KEYS 2017-10-20 16:56:19.000000000 +0000 @@ -46,14 +46,15 @@ VALUES: [ src_mac, dst_mac, vlan, cos, etype, src_host, dst_host, src_net, dst_net, src_mask, dst_mask, src_as, dst_as, src_port, dst_port, tos, proto, none, sum_mac, sum_host, sum_net, sum_as, sum_port, flows, tag, tag2, label, - class, tcpflags, in_iface, out_iface, std_comm, ext_comm, as_path, - peer_src_ip, peer_dst_ip, peer_src_as, peer_dst_as, local_pref, med, - src_std_comm, src_ext_comm, src_as_path, src_local_pref, src_med, mpls_vpn_rd, - mpls_label_top, mpls_label_bottom, mpls_stack_depth, sampling_rate, - src_host_country, dst_host_country, pkt_len_distrib, nat_event, fw_event, - post_nat_src_host, post_nat_dst_host, post_nat_src_port, post_nat_dst_port, - timestamp_start, timestamp_end, timestamp_arrival, export_proto_seqno, - export_proto_version ] + class, tcpflags, in_iface, out_iface, std_comm, ext_comm, lrg_comm, + as_path, peer_src_ip, peer_dst_ip, peer_src_as, peer_dst_as, local_pref, + med, src_std_comm, src_ext_comm, src_lrg_comm, src_as_path, src_local_pref, + src_med, mpls_vpn_rd, mpls_label_top, mpls_label_bottom, mpls_stack_depth, + sampling_rate, src_host_country, dst_host_country, src_host_pocode, + dst_host_pocode, pkt_len_distrib, nat_event, fw_event, post_nat_src_host, + post_nat_dst_host, post_nat_src_port, post_nat_dst_port, tunnel_src_host, + tunnel_dst_host, tunnel_proto, tunnel_tos, timestamp_start, timestamp_end, + timestamp_arrival, export_proto_seqno, export_proto_version ] FOREWORDS: Individual IP packets are uniquely identified by their header field values (a rather large set of primitives!). Same applies to uni-directional IP flows, as they have at least enough information to discriminate where packets are coming @@ -72,8 +73,7 @@ allows to make an unique aggregate which accounts for the grand total of traffic flowing through a specific interface. 'tag', 'tag2' and 'label' enable generation of tags when tagging engines (pre_tag_map, post_tag) are in use. - 'class' enables L7 traffic classes when Packet/Flow Classification engine - (classifiers) is in use. + 'class' enables L7 traffic classification. NOTES: * Some primitives (ie. tag2, timestamp_start, timestamp_end) are not part of any default SQL table schema shipped. Always check out documentation related to the RDBMS in use (ie. 'sql/README.mysql') which will point you to extra @@ -84,15 +84,16 @@ enabled this field will report a value of one (1); otherwise it will report the rate that is passed by the protocol or sampling_map. A value of zero (0) means 'unknown' and hence no rate is applied to original counter values. - * src_std_comm, src_ext_comm, src_as_path are based on reverse BGP lookups; - peer_src_as, src_local_pref and src_med are by default based on reverse BGP - lookups but can be alternatively based on other methods, for example maps - (ie. bgp_peer_src_as_type). Internet traffic is by nature asymmetric hence - reverse BGP lookups must be used with caution (ie. against own prefixes). - * Communities (ie. std_comm, ext_comm) and AS-PATHs (ie. as_path) are fixed - size (96 and 128 chars respectively at time of writing). Directives like - bgp_stdcomm_pattern and bgp_aspath_radius are aimed to keep length of these - strings under control but sometimes this is not enough. While the longer + * src_std_comm, src_ext_comm, src_lrg_comm, src_as_path are based on reverse + BGP lookups; peer_src_as, src_local_pref and src_med are by default based on + reverse BGP lookups but can be alternatively based on other methods, for + example maps (ie. bgp_peer_src_as_type). Internet traffic is by nature + asymmetric hence reverse BGP lookups must be used with caution (ie. against + own prefixes). + * Communities (ie. std_comm, ext_comm, lrg_comm) and AS-PATHs (ie. as_path) + are fixed size (96 and 128 chars respectively at time of writing). Directives + like bgp_stdcomm_pattern and bgp_aspath_radius are aimed to keep length of + these strings under control but sometimes this is not enough. While the longer term approach will be to define these primitives as varchar, the short-term approach is to re-define default size, ie. MAX_BGP_STD_COMMS MAX_BGP_ASPATH in network.h, to the desired size (blowing extra memory). This will require @@ -153,36 +154,39 @@ pmacctd. DEFAULT: none +KEY: pcap_protocol [GLOBAL, PMACCTD_ONLY] +DESC: If set, specifies a specific packet socket protocol value to limit packet capture + to (for example, 0x0800 = IPv4). This option is only supported if pmacct was built + against a version of libpcap that supports pcap_set_protocol(), and it only applies + to pmacctd. +DEFAULT: none + KEY: snaplen (-L) [GLOBAL, NO_NFACCTD, NO_SFACCTD] DESC: Specifies the maximum number of bytes to capture for each packet. This directive has - key importance when enabling both classification and connection tracking engines. In - fact, some protocols (mostly text-based eg.: RTSP, SIP, etc.) benefit of extra bytes - because they give more chances to successfully track data streams spawned by control - channel. But it must be also noted that capturing larger packet portion require more - resources. The right value need to be traded-off. In case classification is enabled, - values under 200 bytes are often meaningless. 500-750 bytes are enough even for text - based protocols. Default snaplen values are ok if classification is disabled. -DEFAULT: 68 bytes; 128 bytes if compiled with --enable-ipv6 + key importance to both classification and connection tracking engines. In fact, some + protocols (mostly text-based eg.: RTSP, SIP, etc.) benefit of extra bytes because + they give more chances to successfully track data streams spawned by control channel. + But it must be also noted that capturing larger packet portion require more resources. + The right value need to be traded-off. In case classification is enabled, values under + 200 bytes are often meaningless. 500-750 bytes are enough even for text based + protocols. Default snaplen values are ok if classification is disabled. +DEFAULT: 128 bytes; 64 bytes if compiled with --disable-ipv6 KEY: plugins (-P) -VALUES: [ memory | print | mysql | pgsql | sqlite3 | mongodb | nfprobe | sfprobe | tee | - amqp | kafka ] +VALUES: [ memory | print | mysql | pgsql | sqlite3 | nfprobe | sfprobe | tee | amqp | kafka ] DESC: Plugins to be enabled. memory, print, nfprobe, sfprobe and tee plugins are always included in pmacct executables as they do not contain dependencies on external - libraries. Database (ie. SQL, MongoDB) and messaging ones (ie. amqp, kafka) do - have external dependencies and hence are available only if explicitely configured - and compiled. + libraries. Database (ie. RDBMS, noSQL) and messaging ones (ie. amqp, kafka) do have + external dependencies and hence are available only if explicitely configured and + compiled. memory plugin uses a memory table as backend; then, a client tool, 'pmacct', can fetch the memory table content; the memory plugin is good for prototype solutions and/or small environments. mysql, pgsql and sqlite3 plugins output respectively to MySQL, PostgreSQL and SQLite 3.x (or BerkeleyDB 5.x with the SQLite API compiled-in) tables - to store data. mongodb enables use of the noSQL document-oriented database MongoDB - (requires installation of MongoDB API C driver which is shipped separatedly from the - main MongoDB package; read more in QUICKSTART in the "noSQL (MongoDB) setup examples" - section). print plugin prints output data to flat-files or stdout in JSON, CSV or - tab-spaced formats, or encodes it using the Apache Avro serialization system. amqp - and kafka plugins allow to output data to RabbitMQ and Kafka brokers respectively. All - these plugins, SQL, no-SQL and messaging are good for production solutions and/or + to store data. print plugin prints output data to flat-files or stdout in JSON, CSV + or tab-spaced formats, or encodes it using the Apache Avro serialization system. amqp + and kafka plugins allow to output data to RabbitMQ and Kafka brokers respectively. + All these plugins, SQL, no-SQL and messaging are good for production solutions and/or larger scenarios. nfprobe acts as a NetFlow/IPFIX agent and exports collected data via NetFlow v1/v5/ v9 and IPFIX datagrams to a remote collector. sfprobe acts as a sFlow agent and @@ -253,39 +257,9 @@ In case of data loss messages containing the "missing data detected" string will be logged - indicating the plugin affected and current settings. -DEFAULT: 4MB - -KEY: plugin_pipe_amqp -VALUES: [ true | false ] -DESC: By defining this directive to 'true', a RabbitMQ broker is used for queueing and - data exchange between the Core Process and the plugins. This is in alternative to - the home-grown circular queue implementation (see plugin_pipe_size description). - This directive, along with all other plugin_pipe_amqp_* directives, can be set - globally or apply on a per plugin basis (ie. it is a valid scenario, if multiple - plugins are instantiated, that some make use of home-grown queueing, while others - use RabbitMQ based queueing). For a quick comparison: while relying on a RabbitMQ - broker for queueing introduces an external dependency (rabbitmq-c library, RabbitMQ - server, etc.), it reduces the amount of setting needed by the home-grown circular - queue implementation. See QUICKSTART for some examples. -DEFAULT: false -KEY: plugin_pipe_kafka -VALUES: [ true | false ] -DESC: By defining this directive to 'true', a Kafka broker is used for queueing and data - exchange between the Core Process and the plugins. This is in alternative to the - home-grown circular queue implementation (see plugin_pipe_size description). This - directive, along with all other plugin_pipe_kafka_* directives, can be set globally - or apply on a per plugin basis (ie. it is a valid scenario, if multiple plugins are - instantiated, that some make use of home-grown queueing, while others use Kafka - based queueing). For a quick comparison: using a Kafka broker gives the same - benefits as using a RabbitMQ broker (read plugin_pipe_amqp); plus, Kafka scales - more than RabbitMQ. See QUICKSTART for some examples. -NOTES: librdkafka does not currently expose the file descriptor(s) used internally so to - allow an external poll() or select() against it/them. This is being worked on as - part of librdkafka issue #429: https://github.com/edenhill/librdkafka/issues/429 . - As a result of that, the plugin_pipe_kafka feature is limited to 'print' plugin as - a proof of concept. -DEFAULT: false + Alternatively see at plugin_pipe_zmq and plugin_pipe_zmq_profile. +DEFAULT: 4MB KEY: plugin_buffer_size DESC: By defining the transfer buffer size, in bytes, this directive enables buffering of @@ -296,31 +270,58 @@ with the home-grown circular queue implemetation, the value has to be minor/equal to the size defined by 'plugin_pipe_size' and keeping a ratio between 1:100 and 1:1000 among the two is considered good practice; the circular queue of plugin_pipe_size size - is partitioned in chunks of plugin_buffer_size; if used with the RabbitMQ broker based - queueing (ie. 'plugin_pipe_amqp: true') this directive sets the frame_max allowed by - the underlying RabbitMQ session. -DEFAULT: Set to the size of the smallest element to buffer + is partitioned in chunks of plugin_buffer_size. -KEY: plugin_pipe_backlog -VALUES: [0 <= value < 100] -DESC: Expects the value to be a percentage. It creates a backlog of buffers on the pipe - before actually releasing them to the plugin. The strategy helps optimizing inter - process communications where plugins are quicker handling data than the Core process. - By default backlog is disabled; as with buffering in general, this feature should be - enabled with caution in lab and low-traffic environments. -DEFAULT: 0 + Alternatively see at plugin_pipe_zmq and plugin_pipe_zmq_profile. +DEFAULT: Set to the size of the smallest element to buffer KEY: plugin_pipe_check_core_pid VALUES: [ true | false ] DESC: When enabled (default), validates the sender of data at the plugin side. The check consists in verifying that the sender PID matches the PID of the plugin parent - process. This is useful when plugin_pipe_amqp or plugin_pipe_kafka are enabled and - hence a broker sits between the daemon Core Process and the Plugins. The feature is - not inteded to be a security one; instead its objective is to limit impact of such - things like mis-configurations, daemons started twice with the same configuration, - etc. + process. The feature is not inteded to be a security one; instead its objective is + to limit impact of such things like mis- configurations, daemons started twice with + the same configuration, etc. DEFAULT: true +KEY: plugin_pipe_zmq +VALUES: [ true | false ] +DESC: By defining this directive to 'true', a ZeroMQ queue is used for queueing and data + exchange between the Core Process and the plugins. This is in alternative to the + home-grown circular queue implementation (see plugin_pipe_size description). This + directive, along with all other plugin_pipe_zmq_* directives, can be set globally + or be applied on a per plugin basis (ie. it is a valid scenario, if multiple + plugins are instantiated, that some make use of home-grown queueing, while others + use ZeroMQ based queueing). For a quick comparison: while relying on a ZeroMQ queue + introduces an external dependency, ie. libzmq, it reduces the bare minimum the need + of settings of the home-grown circular queue implementation. See QUICKSTART for + some examples. +DEFAULT: false + +KEY: plugin_pipe_zmq_retry +DESC: Defines the interval of time, in seconds, after which a connection to the ZeroMQ + server (Core Process) should be retried by the client (Plugin) after a failure is + detected. +DEFAULT: 60 + +KEY: plugin_pipe_zmq_profile +VALUES: [ micro | small | medium | large | xlarge ] +DESC: Allows to select some standard buffering profiles. Following are the recommended + buckets in flows/samples/packets per second: + + micro : up to 1K + small : from 1K to 10-15K + medium : from 10-10K to 100-125K + large : from 100-125K to 250K + xlarge : from 250K + + A symptom the selected profile is undersized is missing data warnings appear in + the logs; a symptom it is oversized instead is latency in data being purged out. + The amount of flows/samples per second can be estimated as described in Q21 in + the FAQS document. Should no profile fit the sizing, the buffering value can be + customised using the plugin_buffer_size directive. +DEFAULT: micro + KEY: files_umask DESC: Defines the mask for newly created files (log, pid, etc.) and their related directory structure. A mask less than "002" is not accepted due to security reasons. @@ -347,15 +348,6 @@ exclusive with 'pcap_savefile' (-I). DEFAULT: Interface is selected by by the Operating System -KEY: pcap_savefile (-I) [GLOBAL, PMACCTD_ONLY] -DESC: File in libpcap savefile format from which read data (this is in alternative to binding - to an intervace). The file has to be correctly finalized in order to be read. As soon - as 'pmacctd' is finished with the file, it exits (unless the 'savefile_wait' option is - in place). The directive doesn't apply to [ns]facctd; to replay original NetFlow/sFlow - streams, a tool like TCPreplay can be used instead. The directive is mutually exclusive - with 'interface' (-i). -DEFAULT: none - KEY: interface_wait (-w) [GLOBAL, PMACCTD_ONLY] VALUES: [ true | false ] DESC: If set to true, this option causes 'pmacctd' to wait for the listening device to become @@ -364,12 +356,21 @@ detected. DEFAULT: false -KEY: savefile_wait (-W) [GLOBAL, PMACCTD_ONLY] +KEY: pcap_savefile (-I) [GLOBAL, NO_UACCTD] +DESC: File in libpcap savefile format to read data from (as an alternative to live data + collection. The file has to be correctly finalized in order to be read. As soon as + the daemon finished processing the file, it exits (unless the 'pcap_savefile_wait' + config directive is specified). The directive is mutually exclusive with 'interface' + (-i) for pmacctd and with [ns]facctd_ip (-L) and [ns]facctd_port (-l) for nfacctd + and sfacctd respectively. +DEFAULT: none + +KEY: pcap_savefile_wait (-W) [GLOBAL, NO_UACCTD] VALUES: [ true | false ] -DESC: If set to true, this option will cause 'pmacctd' to wait indefinitely for a signal (ie. - CTRL-C when not daemonized or 'killall -9 pmacctd' if it is) after being finished with - the supplied libpcap savefile (pcap_savefile). It's particularly useful when inserting - fixed amounts of data into memory tables by keeping the daemon alive. +DESC: If set to true, this option will cause the daemon to wait indefinitely for a signal + (ie. CTRL-C when not daemonized or 'killall -9 pmacctd' if it is) after being finished + processing the supplied libpcap savefile (pcap_savefile). This is particularly useful + when inserting fixed amounts of data into memory tables. DEFAULT: false KEY: promisc (-N) [GLOBAL, PMACCTD_ONLY] @@ -412,13 +413,14 @@ DEFAULT: none (logging to stderr) KEY: logfile -DESC: Enables logging to a file (bypassing syslog); expected value is a pathname +DESC: Enables logging to a file (bypassing syslog); expected value is a pathname. The target + file can be re-opened by sending a SIGHUP to the daemon so that, for example, logs can + be rotated. DEFAULT: none (logging to stderr) -KEY: [ amqp_host | plugin_pipe_amqp_host ] +KEY: amqp_host DESC: Defines the AMQP/RabbitMQ broker IP. amqp_* directives refer to the broker used by an - AMQP plugin to purge data out; plugin_pipe_amqp_* directives refer to the broker used - by the core process to send data to plugins. + AMQP plugin to purge data out. DEFAULT: localhost KEY: [ bgp_daemon_msglog_amqp_host | bgp_table_dump_amqp_host | bmp_dump_amqp_host | @@ -431,12 +433,12 @@ directives refer to the broker used by the BMP thread to dump data out at regular time intervals; sfacctd_counter_amqp_* directives refer to the broker used by sfacctd to stream sFlow counter data out; telemetry_daemon_msglog_amqp_* directives refer to the - broker used by the streaming network telemetry thread/daemon to stream data out; - telemetry_dump_amqp_* directives refer to the broker used by the streaming network - telemetry thread/daemon to dump data out at regular time intervals. + broker used by the Streaming Telemetry thread/daemon to stream data out; + telemetry_dump_amqp_* directives refer to the broker used by the Streaming Telemetry + thread/daemon to dump data out at regular time intervals. DEFAULT: See amqp_host -KEY: [ amqp_vhost | plugin_pipe_amqp_vhost ] +KEY: amqp_vhost DESC: Defines the AMQP/RabbitMQ server virtual host; see also amqp_host. DEFAULT: "/" @@ -446,7 +448,7 @@ DESC: See amqp_vhost; see also bgp_daemon_msglog_amqp_host. DEFAULT: See amqp_vhost -KEY: [ amqp_user | plugin_pipe_amqp_user ] +KEY: amqp_user DESC: Defines the username to use when connecting to the AMQP/RabbitMQ server; see also amqp_host. DEFAULT: guest @@ -457,7 +459,7 @@ DESC: See amqp_user; see also bgp_daemon_msglog_amqp_host. DEFAULT: See amqp_user -KEY: [ amqp_passwd | plugin_pipe_amqp_passwd ] +KEY: amqp_passwd DESC: Defines the password to use when connecting to the server; see also amqp_host. DEFAULT: guest @@ -507,6 +509,11 @@ the feature is disabled, meaning all messages are sent to the base AMQP routing key or Kafka topic (or the default one, if no amqp_routing_key or kafka_topic is being specified). + For Kafka it is adviced to create topics in advance with a tool like kafka-topics.sh + (ie. "kafka-topics.sh --zookeepeer --topic --create") even + if auto.create.topics.enable is set to true (default) on the broker. This is because + topic creation, especially on distributed systems, may take time and lead to data + loss. DEFAULT: 0 KEY: [ bgp_daemon_msglog_amqp_routing_key_rr | bgp_table_dump_amqp_routing_key_rr | @@ -516,7 +523,7 @@ DESC: See amqp_routing_key_rr; see also bgp_daemon_msglog_amqp_host. DEFAULT: See amqp_routing_key_rr -KEY: [ amqp_exchange | plugin_pipe_amqp_exchange ] +KEY: amqp_exchange DESC: Name of the AMQP exchange to publish data; see also amqp_host. DEFAULT: pmacct @@ -528,8 +535,10 @@ DEFAULT: See amqp_exchange; see also bgp_daemon_msglog_amqp_host. KEY: amqp_exchange_type -DESC: Type of the AMQP exchange to publish data. Currently only 'direct' and 'fanout' types are - supported; see also amqp_host. +DESC: Type of the AMQP exchange to publish data to. 'direct', 'fanout' and 'topic' + types are supported; "rabbitmqctl list_exchanges" can be used to check the + exchange type. Upon mismatch of exchange type, ie. exchange type is 'direct' + but amqp_exchange_type is set to 'topic', an error will be returned. DEFAULT: direct KEY: [ bgp_daemon_msglog_amqp_exchange_type | bgp_table_dump_amqp_exchange_type | @@ -541,13 +550,15 @@ KEY: amqp_persistent_msg VALUES: [ true | false ] -DESC: Marks messages as persistent so that a queue content does not get lost if RabbitMQ restarts. - Note from RabbitMQ docs: "Marking messages as persistent doesn't fully guarantee that a - message won't be lost. Although it tells RabbitMQ to save message to the disk, there is - still a short time window when RabbitMQ has accepted a message and hasn't saved it yet. - Also, RabbitMQ doesn't do fsync(2) for every message -- it may be just saved to cache and - not really written to the disk. The persistence guarantees aren't strong, but it is more - than enough for our simple task queue."; see also amqp_host. +DESC: Marks messages as persistent and sets Exchange as durable so to prevent data loss + if a RabbitMQ server restarts (it will still be consumer responsibility to declare + the queue durable). Note from RabbitMQ docs: "Marking messages as persistent does + not fully guarantee that a message won't be lost. Although it tells RabbitMQ to + save message to the disk, there is still a short time window when RabbitMQ has + accepted a message and hasn't saved it yet. Also, RabbitMQ doesn't do fsync(2) for + every message -- it may be just saved to cache and not really written to the disk. + The persistence guarantees aren't strong, but it is more than enough for our simple + task queue."; see also amqp_host. DEFAULT: false KEY: [ bgp_daemon_msglog_amqp_persistent_msg | bgp_table_dump_amqp_persistent_msg | @@ -585,29 +596,12 @@ DESC: See amqp_heartbeat_interval; see also bgp_daemon_msglog_amqp_host. DEFAULT: See amqp_heartbeat_interval -KEY: plugin_pipe_amqp_routing_key -DESC: Name of the AMQP routing key to use to send data to a plugin. Currently each plugin - must bind to a different routing key in order to avoid duplications. Dynamic names - are supported through the use of variables, which are computed at startup. The list - of variables supported is: - - $core_proc_name Configured core_proc_name. - - $plugin_name Configured plugin name (ie. 'foo' if 'plugins: print[foo]') - - $plugin_type Plugin type (ie. memory, print, amqp, etc.) - -DEFAULT: '$core_proc_name-$plugin_name-$plugin_type' - -KEY: plugin_pipe_amqp_retry -DESC: Defines the interval of time, in seconds, after which a connection to the RabbitMQ - server should be retried after a failure is detected; see also amqp_host. -DEFAULT: 60 - KEY: [ bgp_daemon_msglog_amqp_retry | bmp_daemon_msglog_amqp_retry | sfacctd_counter_amqp_retry | telemetry_daemon_msglog_amqp_retry ] [GLOBAL] -DESC: See plugin_pipe_amqp_retry; see also bgp_daemon_msglog_amqp_host. -DEFAULT: See plugin_pipe_amqp_retry +DESC: Defines the interval of time, in seconds, after which a connection to the RabbitMQ + server should be retried after a failure is detected; see also amqp_host. See also + bgp_daemon_msglog_amqp_host. +DEFAULT: 60 KEY: kafka_topic DESC: Name of the Kafka topic to attach to published data. Dynamic names are supported by @@ -622,50 +616,56 @@ $post_tag2 Configured value of post_tag2. -DEFAULT: kafka_topic: 'pmacct.acct' + It is adviced to create topics in advance with a tool like kafka-topics.sh (ie. + "kafka-topics.sh --zookeepeer --topic --create") even if + auto.create.topics.enable is set to true (default) on the broker. This is because + topic creation, especially on distributed systems, may take time and lead to data + loss. + +DEFAULT: 'pmacct.acct' + +KEY: kafka_config_file +DESC: Full pathname to a file containing directives to configure librdkafka. All knobs + whose values are string, integer, boolean, CSV are supported. Pointer values, ie. + for setting callbacks, are currently not supported through this infrastructure. + The syntax of the file is CSV and expected in the format: where + 'type' is one of 'global' or 'topic' and 'key' and 'value' are set according to + librdkafka doc https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + Both 'key' and 'value' are passed onto librdkafka without any validation being + performed; the 'value' field can also contain commas no problem as it is also not + parsed. Examples are: -KEY: plugin_pipe_kafka_topic -DESC: Name of the Kafka topic to use to send data to a plugin. Currently each plugin must - bind to a different routing key in order to avoid duplications. Dynamic names are - supported through the use of variables, which are computed at startup. The list of - variables supported is: + topic, compression.codec, snappy + global, socket.keepalive.enable, true - $core_proc_name Configured core_proc_name. - - $plugin_name Configured plugin name (ie. 'foo' if 'plugins: print[foo]') - - $plugin_type Plugin type (ie. memory, print, amqp, etc.) - -DEFAULT: '$core_proc_name-$plugin_name-$plugin_type' - -KEY: plugin_pipe_kafka_retry -DESC: Defines the interval of time, in seconds, after which a connection to the Kafka - broker should be retried after a failure is detected. -DEFAULT: 60 +DEFAULT: none -KEY: [ kafka_broker_host | plugin_pipe_broker_host ] +KEY: kafka_broker_host DESC: Defines one or multiple, comma-separated, Kafka brokers. If only a single broker IP address is defined then the broker port is read via the kafka_broker_port config directive (legacy syntax); if multiple brokers are defined then each broker port, if not left to default 9092, is expected as part of this directive, for example: - "broker1:10000,broker2". kafka_* directives refer to the broker used by a kafka - plugin to purge data out; plugin_pipe_kafka_* directives refer to the broker used - by the core process to send data to plugins. + "broker1:10000,broker2". When defining multiple brokers, if the host is IPv4, the + value is expected as 'address:port'. If IPv6, it is expected as '[address]:port'. + When defining a single broker, this is not needed as the IPv6 address is detected + and wrapped-around '[' ']' symbols. FQDNs are also accepted. SSL connections can be + configured as "ssl://broker3:9000,ssl://broker2". DEFAULT: 127.0.0.1 -KEY: [ kafka_broker_port | plugin_pipe_broker_port ] +KEY: kafka_broker_port DESC: Defines the Kafka broker port. See also kafka_broker_host. DEFAULT: 9092 -KEY: [ kafka_partition | plugin_pipe_kafka_partition ] +KEY: kafka_partition DESC: Defines the Kafka broker topic partition ID. RD_KAFKA_PARTITION_UA or ((int32_t)-1) is to define the configured or default partitioner (slower than sending to a fixed partition). See also kafka_broker_host. -DEFAULT: 0 +DEFAULT: -1 KEY: kafka_partition_key DESC: Defines the Kafka broker topic partition key. A string of printable characters is expected as value. +DEFAULT: none KEY: [ bgp_daemon_msglog_kafka_broker_host | bgp_table_dump_kafka_broker_host | bmp_daemon_msglog_kafka_broker_host | bmp_dump_kafka_broker_host | @@ -704,7 +704,7 @@ KEY: [ bgp_daemon_msglog_kafka_partition_key | bgp_table_dump_kafka_partition_key | - bmp_daemon_msglog_kafka_partition_key | bmp_dump_kafka_partition | + bmp_daemon_msglog_kafka_partition_key | bmp_dump_kafka_partition_key | sfacctd_counter_kafka_partition_key | telemetry_daemon_msglog_kafka_partition_key | telemetry_dump_kafka_partition_key ] [GLOBAL] @@ -713,8 +713,16 @@ KEY: [ bgp_daemon_msglog_kafka_retry | bmp_daemon_msglog_kafka_retry | sfacctd_counter_kafka_retry | telemetry_daemon_msglog_kafka_retry ] [GLOBAL] -DESC: See plugin_pipe_kafka_retry -DEFAULT: See plugin_pipe_kafka_retry +DESC: Defines the interval of time, in seconds, after which a connection to the Kafka + broker should be retried after a failure is detected. +DEFAULT: 60 + +KEY: [ bgp_daemon_msglog_kafka_config_file | bgp_table_dump_kafka_config_file | + bmp_daemon_msglog_kafka_config_file | bmp_dump_kafka_config_file | + sfacctd_counter_kafka_config_file | telemetry_daemon_msglog_kafka_config_file | + telemetry_dump_kafka_config_file ] [GLOBAL] +DESC: See kafka_config_file +DEFAULT: See kafka_config_file KEY: pidfile (-F) [GLOBAL] DESC: Writes PID of Core process to the specified file. PIDs of the active plugins are written @@ -737,7 +745,9 @@ KEY: networks_file_filter VALUES [ true | false ] DESC: Makes networks_file work as a filter in addition to its basic resolver functionality: - networks and hosts not belonging to defined networks are zeroed out. + networks and hosts not belonging to defined networks are zeroed out. This feature can + interfere with the intended behaviour of networks_no_mask_if_zero, if they are both + set to true. DEFAULT: false KEY: networks_file_no_lpm @@ -765,6 +775,16 @@ networks_file, win instead. DEFAULT: false +KEY: networks_no_mask_if_zero +VALUES [ true | false ] +DESC: If set to true, IP prefixes with zero mask - that is, unknown ones or those hitting a + default route - are not masked (ie. they are applied a full 0xF mask, that is, 32 bits + for IPv4 addresses and 128 bits for IPv6 ones). The feature applies to *_net fields + and makes sure individual IP addresses belonging to unknown IP prefixes are not zeroed + out. This feature can interfere with the intended behaviour of networks_file_filter, + if they are both set to true. +DEFAULT: false + KEY: networks_mask DESC: Specifies the network mask - in bits - to apply to IP address values in L3 header. The mask is applied sistematically and before evaluating the 'networks_file' content (if @@ -793,11 +813,10 @@ directive refers to the full path to the database file DEFAULT: 'pmacct'; SQLite 3.x: '/tmp/pmacct.db' -KEY: [ sql_table | print_output_file | mongo_table ] -DESC: In SQL and mongodb plugins this defines the table to use; in print plugin it defines the - file to write output to. Dynamic names are supported through the use of variables, which - are computed at the moment when data is purged to the backend. The list of supported - variables follows: +KEY: [ sql_table | print_output_file ] +DESC: In SQL this defines the table to use; in print plugin it defines the file to write output + to. Dynamic names are supported through the use of variables, which are computed at the + moment when data is purged to the backend. The list of supported variables follows: %d The day of the month as a decimal number (range 01 to 31). @@ -977,38 +996,31 @@ configuration switch deprecated. DEFAULT: typed -KEY: [ sql_host | mongo_host ] +KEY: sql_host DESC: Defines the backend server IP/hostname DEFAULT: localhost -KEY: [ sql_user | mongo_user ] -DESC: Defines the username to use when connecting to the server. In MongoDB, if both - mongo_user and mongo_passwd directives are omitted, authentication is disabled; - if only one of the two is specified, the other is set to its default value. +KEY: sql_user +DESC: Defines the username to use when connecting to the server. DEFAULT: pmacct -KEY: [ sql_passwd | mongo_passwd ] -DESC: Defines the password to use when connecting to the server. In MongoDB, if both - mongo_user and mongo_passwd directives are omitted, authentication is disabled; - if only one of the two is specified, the other is set to its default value. +KEY: sql_passwd +DESC: Defines the password to use when connecting to the server. DEFAULT: 'arealsmartpwd' -KEY: [ sql_refresh_time | print_refresh_time | mongo_refresh_time | amqp_refresh_time | - kafka_refresh_time ] (-r) +KEY: [ sql_refresh_time | print_refresh_time | amqp_refresh_time | kafka_refresh_time ] (-r) DESC: Time interval, in seconds, between consecutive executions of the plugin cache scanner. The scanner purges data into the plugin backend. Note: internally all these config directives write to the same variable; when using multiple plugins it is recommended to bind refresh time definitions to specific plugins, ie.: - plugins: mysql[x], mongodb[y] + plugins: mysql[x] sql_refresh_time[x]: 900 - mongo_refresh_time[y]: 300 As doing otherwise can originate unexpected behaviours. DEFAULT: 60 -KEY: [ sql_startup_delay | print_startup_delay | mongo_startup_delay | amqp_startup_delay | - kafka_startup_delay ] +KEY: [ sql_startup_delay | print_startup_delay | amqp_startup_delay | kafka_startup_delay ] DESC: Defines the time, in seconds, the first cache scan event has to be delayed. This delay is, in turn, propagated to the subsequent scans. It comes useful in two scenarios: a) so that multiple plugins can use the same refresh time (ie. sql_refresh_time) value, allowing @@ -1029,7 +1041,7 @@ the 'sql_table_version' directive. DEFAULT: false -KEY: [ sql_history | print_history | mongo_history | amqp_history | kafka_history ] +KEY: [ sql_history | print_history | amqp_history | kafka_history ] VALUES: #[s|m|h|d|w|M] DESC: Enables historical accounting by placing accounted data into configurable time-bins. It will use the 'stamp_inserted' (base time of the time-bin) and 'stamp_updated' (last time @@ -1040,19 +1052,18 @@ setting nfacctd_pro_rating to true. Note that this value is fully disjoint from the *_refresh_time directives which set the time intervals at which data has to be written to the backend instead. The final effect is close to time slots in a RRD file. Examples of - valid values are: '300' or '5m' - five minutes, '3600' or '1h' - one hour, '14400' or '4h' - - four hours, '86400' or '1d' - one day, '1w' - one week, '1M' - one month). + valid values are: '300s' or '5m' - five minutes, '3600s' or '1h' - one hour, '14400s' or + '4h' - four hours, '86400s' or '1d' - one day, '1w' - one week, '1M' - one month). DEFAULT: none -KEY: [ sql_history_offset | print_history_offset | mongo_history_offset | amqp_history_offset | - kafka_history_offset ] +KEY: [ sql_history_offset | print_history_offset | amqp_history_offset | kafka_history_offset ] DESC: Sets an offset to timeslots basetime. If history is set to 30 mins (by default creating 10:00, 10:30, 11:00, etc. time-bins), with an offset of 900 seconds (so 15 mins) it will create 10:15, 10:45, 11:15, etc. time-bins. It expects a positive value, in seconds. DEFAULT: 0 -KEY: [ sql_history_roundoff | print_history_roundoff | mongo_history_roundoff | - amqp_history_roundoff | kafka_history_roundoff ] +KEY: [ sql_history_roundoff | print_history_roundoff | amqp_history_roundoff | + kafka_history_roundoff ] VALUES [m,h,d,w,M] DESC: Enables alignment of minutes (m), hours (h), days of month (d), weeks (w) and months (M) in print (to print_refresh_time) and SQL plugins (to sql_history and sql_refresh_time). @@ -1074,8 +1085,7 @@ system on a box) to use in the case the primary backend fails. DEFAULT: none -KEY: [ sql_max_writers | print_max_writers | mongo_max_writers | amqp_max_writers | - kafka_max_writers ] +KEY: [ sql_max_writers | print_max_writers | amqp_max_writers | kafka_max_writers ] DESC: Sets the maximum number of concurrent writer processes the plugin is allowed to start. This setting allows pmacct to degrade gracefully during major backend lock/outages/ unavailability. The value is split as follows: up to N-1 concurrent processes will @@ -1084,8 +1094,7 @@ (so, data will be lost at this stage) and an error message is printed out. DEFAULT: 10 -KEY: [ sql_cache_entries | print_cache_entries | mongo_cache_entries | amqp_cache_entries | - kafka_cache_entries ] +KEY: [ sql_cache_entries | print_cache_entries | amqp_cache_entries | kafka_cache_entries ] DESC: All plugins have a memory cache in order to store data until next purging event (see refresh time directives, ie. sql_refresh_time). In case of network traffic data, the cache allows to accumulate bytes and packets counters. This directive sets the number @@ -1123,8 +1132,8 @@ MPLS-related primitives; all these can make the total cache memory size increase slightly at runtime. -DEFAULT: sql_cache_entries: 32771; print_cache_entries, mongo_cache_entries, amqp_cache_entries, - kafka_cache_entries: 16411 +DEFAULT: print_cache_entries, amqp_cache_entries, kafka_cache_entries: 16411; + sql_cache_entries: 32771 KEY: sql_dont_try_update VALUES: [ true | false ] @@ -1159,19 +1168,20 @@ applies only to MySQL and SQLite 3.x plugins. Inserting many rows at the same time is much faster (many times faster in some cases) than using separate single-row INSERT statements. It's adviceable to check the size of this pmacct buffer against the size of the corresponding - MySQL buffer (max_allowed_packet). In AMQP plugin, amqp_multi_values enables similar feature: - the value is intended as the amount of elements to pack in each JSON array. + MySQL buffer (max_allowed_packet). In AMQP and Kafka plugins, [amqp|kafka]_multi_values allow + the same with JSON serialization (for Avro see avro_buffer_size); in this case data is encoded + in JSON objects newline-separated (preferred to JSON arrays for performance). DEFAULT: 0 -KEY: [ sql_trigger_exec | print_trigger_exec | mongo_trigger_exec ] +KEY: [ sql_trigger_exec | print_trigger_exec | amqp_trigger_exec | kafka_trigger_exec ] DESC: Defines the executable to be launched at fixed time intervals to post-process aggregates; in SQL plugins, intervals are specified by the 'sql_trigger_time' directive; if no interval is supplied 'sql_refresh_time' value is used instead: this will result in a trigger being fired each purging event. A number of environment variables are set in order to allow the trigger to take actions; take a look to docs/TRIGGER_VARS to check them out. In the print - and mongodb plugins a simpler implementation is made: triggers can be fired each time data - is written to the backend (ie. print_refresh_time) and no environment variables are passed - over to the executable. + plugin a simpler implementation is made: triggers can be fired each time data is written to + the backend (ie. print_refresh_time) and no environment variables are passed over to the + executable. DEFAULT: none KEY: sql_trigger_time @@ -1183,7 +1193,7 @@ the executable will be fired each hour). DEFAULT: none -KEY: [ sql_preprocess | print_preprocess | mongo_preprocess | amqp_preprocess | kafka_preprocess ] +KEY: [ sql_preprocess | print_preprocess | amqp_preprocess | kafka_preprocess ] DESC: Allows to process aggregates (via a comma-separated list of conditionals and checks) while purging data to the backend thus resulting in a powerful selection tier; aggregates filtered out may be just discarded or saved through the recovery mechanism (if enabled, if supported @@ -1293,8 +1303,7 @@ the following line can be used to instrument the print plugin: 'print_preprocess: minb=100000'. DEFAULT: none -KEY: [ sql_preprocess_type | print_preprocess_type | mongo_preprocess_type | amqp_preprocess_type | - kafka_preprocess_type ] +KEY: [ sql_preprocess_type | print_preprocess_type | amqp_preprocess_type | kafka_preprocess_type ] VALUES: [ any | all ] DESC: When more checks are to be evaluated, this directive tells whether aggregates on the queue are valid if they just match one of the checks (any) or all of them (all). @@ -1312,35 +1321,25 @@ VALUES [ true | false ] DESC: All timestamps (ie. timestamp_start, timestamp_end, timestamp_arrival primitives; sql_history- related fields stamp_inserted, stamp_updated; etc.) in the standard seconds since the Epoch - format. In case the output is to a RDBMS, setting this directive to true will require changes - to the default types for timestamp fields in the SQL schema. + format. This not only makes output more compact but also prevents computationally expensive + time-formatting functions to be invoked, resulting in speed gains at purge time. In case the + output is to a RDBMS, setting this directive to true will require changes to the default types + for timestamp fields in the SQL schema. MySQL: DATETIME ==> INT(8) UNSIGNED PostgreSQL: timestamp without time zone ==> bigint SQLite3: DATETIME ==> INT(8) DEFAULT: false -KEY: mongo_insert_batch -DESC: When purging data in a MongoDB database, defines the amount of elements to be inserted per - batch. This value depends on available memory: with 8GB RAM a max 35000 value did work OK; - with 16GB RAM a max 75000 value did work OK instead. -DEFAULT: 10000 - -KEY: mongo_indexes_file -DESC: Full pathname to a file containing a list of indexes to apply to a MongoDB collection with - dynamic name. If the collection does not exists, it is created. Index names are picked by - MongoDB. For example, to create collections with two indexes 1) one using as key source/ - destination IP addresses and 2) the other using source/destination TCP/UDP ports compile - the file pointed by this directive as: - - src_host, dst_host - src_port, dst_port - KEY: [ print_markers | amqp_markers | kafka_markers ] VALUES: [ true | false ] DESC: Enables the use of start/end markers each time data is purged to the backend. Both start and end markers return additional information, ie. writer PID, number of entries purged, - elapsed time, etc. + elapsed time, etc. When plugin output is in JSON or Avro plugin outputs, markers are + encoded in JSON format and event_type is set to purge_init and purge_close respectively. + In the case of Kafka topics with multiple partitions, the purge_close message can arrive + out of order so other mechanisms should be used to correlate messages as being part of + the same batch (ie. writer_id). DEFAULT: false KEY: print_output @@ -1391,15 +1390,33 @@ [amqp, kafka]_multi_values configuration directive, the current records stored in the buffer will be sent to the message broker and the buffer will be cleared to accomodate subsequent records. -DEFAULT: 4096 +DEFAULT: 8192 KEY: avro_schema_output_file DESC: When the Avro format is used to encode the messages sent to a message broker (amqp and kafka - plugins), this option causes the schema used to encode the messages to be dumped to the file path - given. The schema can then be used by the receiving end to decode the messages. Note that the - schema will be dynamically built based on the aggregation primitives chosen. + plugins), this option causes the schema used to encode the messages to be dumped to the file + path given. The schema can then be used by the receiving end to decode the messages. Note + that the schema will be dynamically built based on the aggregation primitives chosen. This + has also effect in the print plugin but in this case the schema is also always included in + the print_output_file as mandated by Avro specification. + +KEY: [ amqp_avro_schema_routing_key | kafka_avro_schema_topic ] +DESC: AMQP routing key or Kafka topic on which the generated Avro schema is sent over at regular + time intervals by AMQP and Kafka plugins (it can potentially be the same as kafka_topic or + amqp_routing_key). The schema can then be used by the receiving end to decode the messages. + All other parameters to connect to the broker, ie. host, port, etc. are shared with the main + plugin routing key or topic. The time intervals are set via amqp_avro_schema_refresh_time + and kafka_avro_schema_refresh_time. Schemas are carried as part of the 'schema' field in + an envelope JSON message with 'event_type' set to purge_schema. +DEFAULT: none + + +KEY: [ amqp_avro_schema_refresh_time | kafka_avro_schema_refresh_time ] +DESC: Time interval, in seconds, at which the generated Avro schema is sent over the configured + AMQP routing key (amqp_avro_schema_routing_key) or Kafka topic (kafka_avro_schema_topic). +DEFAULT: 60 -KEY: [ print_num_protos | sql_num_protos | amqp_num_protos | mongo_num_protos | kafka_num_protos ] +KEY: [ print_num_protos | sql_num_protos | amqp_num_protos | kafka_num_protos ] VALUES: [ true | false ] DESC: Defines whether IP protocols (ie. tcp, udp) should be looked up and presented in string format or left numerical. The default is to look protocol names up. @@ -1410,9 +1427,11 @@ DESC: Defines whether IP addresses should be left numerical (in network bytes ordering) or converted into human-readable strings. Applies to MySQL and SQLite plugins only and assumes the INET_ATON() and INET6_ATON() function are defined in the RDBMS. INET_ATON() is always defined in MySQL whereas - INET6_ATON() requires MySQL >= 5.6.3. Bothfunctions are not defined by default in SQLite instead. - The feature is not compatible with making use of IP prefix labels. Default setting is to convert - IP addresses and prefixes into strings. + INET6_ATON() requires MySQL >= 5.6.3. Both functions are not defined by default in SQLite instead + and are to be user-defined: if pmacct is compiled with --disable-ipv6, a INET_ATON() function is + invoked; if pmacct is compiled with --enable-ipv6 (default), a INET6_ATON() function is invoked. + The feature is not compatible with making use of IP prefix labels. Default setting, false, is to + convert IP addresses and prefixes into strings. DEFAULT: false KEY: [ nfacctd_port | sfacctd_port ] (-l) [GLOBAL, NO_PMACCTD, NO_UACCTD] @@ -1448,12 +1467,12 @@ for secs and msecs timestamps, increasing collector awareness. DEFAULT: false -KEY: nfacctd_time_new [GLOBAL, NFACCTD_ONLY] +KEY: [ nfacctd_time_new | pmacctd_time_new | sfacctd_time_new ] [GLOBAL, NO_UACCTD] VALUES: [ true | false ] -DESC: Makes 'nfacctd' to ignore timestamps included in NetFlow header and build new ones. This gets - particularly useful to assign flows to time-bins based on the flow arrival time at the collector - rather than the flow start time. An application for it is when historical accounting is enabled - ('sql_history') and an INSERT-only mechanism is in use ('sql_dont_try_update', 'sql_use_copy'). +DESC: Makes the daemon to ignore external timestamps associated to data, ie. included in NetFlow + header or pcap header, and generate new ones (reflecting data arrival time to the collector). + This gets particularly useful to assign flows to time-bins based on the flow arrival time at + the collector rather than the flow original (start) time. DEFAULT: false KEY: nfacctd_pro_rating [NFACCTD_ONLY] @@ -1469,6 +1488,17 @@ (nfacctd_renormalize set to true). DEFAULT: false +KEY: nfacctd_templates_file [NFACCTD_ONLY] +DESC: Full pathname to a file containing serialized templates data from previous nfacctd use. + Templates are loaded from this file when nfacctd is (re)started in order to reduce the + amount of dropped packets due to unknown templates. Be aware that this file will be + written to with possible new templates and updated versions of provided ones. Hence, an + empty file can be specified and incoming templates will be cached into it. This file + will be created if it does not exist. Only JSON format is currently supported and + requires compiling against Jansson library (--enable-jansson when configuring for + compiling). +DEFAULT: none + KEY: [ nfacctd_stitching | sfacctd_stitching | pmacctd_stitching | uacctd_stitching ] VALUES: [ true | false ] DESC: If set to true adds two new fields, timestamp_min and timestamp_max: given an aggregation @@ -1564,6 +1594,13 @@ and notes). DEFAULT: true +KEY: nfacctd_disable_opt_scope_check [GLOBAL, ONLY_NFACCTD] +VALUES: [ true | false ] +DESC: Mainly a workaround to implementations not encoding NetFlow v9/IPIFX option scope correctly, + this knob allows to disable option scope checking. By doing so, options are considered scoped + to the system level (ie. to the IP address of the expoter). +DEFAULT: false + KEY: pre_tag_map [MAP] DESC: Full pathname to a file containing tag mappings. Tags can be internal-only (ie. for filtering purposes, see pre_tag_filter configuration directive) or exposed to users (ie. if 'tag', 'tag2' @@ -1571,71 +1608,74 @@ sub-tree for all supported keys and detailed examples (pretag.map.example). Pre-Tagging is evaluated in the Core Process and each plugin can be defined a local pre_tag_map. Result of evaluation of pre_tag_map overrides any tags passed via NetFlow/sFlow by a pmacct nfprobe/ - sfprobe plugin. + sfprobe plugin. Number of map entries (by default 384) can be modified via maps_entries. + Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 + nfacctd"). DEFAULT: none KEY: maps_entries DESC: Defines the maximum number of entries a map (ie. pre_tag_map and all directives with the - 'MAP' flag) can contain. The default value is suitable for most scenarios, though tuning it - could be required either to save on memory or to allow for more entries. Refer to the - specific map directives documentation in this file to see which are affected by this setting. + 'MAP' flag in this document) can contain. The default value is suitable for most scenarios, + though tuning it could be required either to save on memory or to allow for more entries. + Refer to the specific map directives documentation in this file to see which are affected by + this setting. DEFAULT: 384 KEY: maps_row_len -DESC: Defines the maximum length of map (ie. pre_tag_map and all directives with the 'MAP' flag) - rows. The default value is suitable for most scenario, though tuning it could be required - either to save on memory or to allow for more entries. +DESC: Defines the maximum length of map (ie. pre_tag_map and all directives with the 'MAP' flag in + this document) rows. The default value is suitable for most scenario, though tuning it could + be required either to save on memory or to allow for more entries. DEFAULT: 256 KEY: maps_refresh [GLOBAL] VALUES: [ true | false ] DESC: When enabled, this directive allows to reload map files (ie. pre_tag_map and all directives - with the 'MAP' flag) without restarting the daemon instance. For example, it may result - particularly useful to reload pre_tag_map or networks_file entries in order to reflect some - change in the network. After having modified the map files, a SIGUSR2 has to be sent (e.g.: - in the simplest case "killall -USR2 pmacctd") to the daemon to notify the change. If such - signal is sent to the daemon and this directive is not enabled, the signal is silently - discarded. The Core Process is in charge of processing the Pre-Tagging map; plugins are - devoted to Networks and Ports maps instead. Then, because signals can be sent either to the - whole daemon (killall) or to just a specific process (kill), this mechanism also offers the - advantage to elicit local reloads. + with the 'MAP' flag in this document) without restarting the daemon instance. For example, + it may result particularly useful to reload pre_tag_map or networks_file entries in order + to reflect some change in the network. After having modified the map files, a SIGUSR2 has + to be sent (e.g.: in the simplest case "killall -USR2 pmacctd") to the daemon to notify the + change. If such signal is sent to the daemon and this directive is not enabled, the signal + is silently discarded. The Core Process is in charge of processing the Pre-Tagging map; + plugins are devoted to Networks and Ports maps instead. Then, because signals can be sent + either to the whole daemon (killall) or to just a specific process (kill), this mechanism + also offers the advantage to elicit local reloads. DEFAULT: true KEY: maps_index [GLOBAL] VALUES: [ true | false ] -DESC: Enables indexing of maps (ie. pre_tag_map and all directives with the 'MAP' flag) to - increase lookup speeds on large maps and/or sustained lookup rates. Indexes are automatically - defined basing on structure and content of the map, up to a maximum of 8. Indexing of - pre_tag_map, bgp_peer_src_as_map, flow_to_rd_map is supported. Only a sub-set of pre_tag_map - fields are supported, including: ip, bgp_nexthop, vlan, cvlan, src_mac, mpls_vpn_rd, - src_as, dst_as, peer_src_as, peer_dst_as, input, output. Only IP addresses, ie. no IP prefixes, - are supported as part of the 'ip' field. Also, negations are not supported (ie. 'in=-216' match - all but input interface 216). bgp_agent_map and sampling_map implement a separate caching - mechanism and hence do not leverage this feature. Duplicates in the key part of the map - entry, key being defined as all fields except set_* ones, are not supported and may result - in a "out of index space" message. +DESC: Enables indexing of maps (ie. pre_tag_map and all directives with the 'MAP' flag in this + document) to increase lookup speeds on large maps and/or sustained lookup rates. Indexes + are automatically defined basing on structure and content of the map, up to a maximum of + 8. Indexing of pre_tag_map, bgp_peer_src_as_map, flow_to_rd_map is supported. Only a sub- + set of pre_tag_map fields are supported, including: ip, bgp_nexthop, vlan, cvlan, src_mac, + mpls_vpn_rd, src_as, dst_as, peer_src_as, peer_dst_as, input, output. Only IP addresses, + ie. no IP prefixes, are supported as part of the 'ip' field. Also, negations are not + supported (ie. 'in=-216' match all but input interface 216). bgp_agent_map and sampling_map + implement a separate caching mechanism and hence do not leverage this feature. Duplicates + in the key part of the map entry, key being defined as all fields except set_* ones, are + not supported and may result in a "out of index space" message. DEFAULT: false KEY: pre_tag_filter, pre_tag2_filter [NO_GLOBAL] VALUES: [ 0-2^64-1 ] DESC: Expects one or more tags (when multiple tags are supplied, they need to be comma separated and a logical OR is used in the evaluation phase) as value and allows to filter aggregates - basing upon their tag value: in case of a match, the aggregate is delivered to the plugin. - This directive has to be bound to a plugin (that is, it cannot be global) and is suitable, - for example, to split tagged data among the active plugins. While tags themselves need to be - positive values, this directive also allows to specify a tag value '0' to intercept untagged + basing upon their tag (or tag2) value: in case of a match, the aggregate is filtered in, ie. + it is delivered to the plugin it is attached to. This directive has to be attached to a + plugin (that is, it cannot be global) and is suitable, for example, to split tagged data + among the active plugins. This directive also allows to specify a value '0' to match untagged data, thus allowing to split tagged traffic from untagged one. It also allows negations by pre-pending a minus sign to the tag value (ie. '-6' would send everything but traffic tagged - as '6' to the plugin it is bound to) and ranges (ie. '10-20' would send over traffic tagged - in the range 10..20) and any combination of these. This directive makes sense if coupled with - 'pre_tag_map'; it could be used in conjunction with 'aggregate_filter'. + as '6' to the plugin it is attached to, hence achieving a filter out behaviour) and ranges + (ie. '10-20' would send over traffic tagged in the range 10..20) and any combination of these. + This directive makes sense if coupled with 'pre_tag_map'. DEFAULT: none KEY: pre_tag_label_filter [NO_GLOBAL] DESC: Expects one or more labels (when multiple labels are supplied, they need to be comma - separated and a logical OR is used in the evaluation phase) as value and allows to filter - aggregates basing upon their label value: only in case of match data is delivered to the - plugin. This directive has to be bound to a plugin (that is, it cannot be global). Null + separated and a logical OR is used in the evaluation phase) as value and allows to filter in + aggregates basing upon their label value(s): only in case of match data is delivered to the + plugin. This directive has to be attached to a plugin (that is, it cannot be global). Null label values (ie. unlabelled data) can be matched using the 'null' keyword. Negations are allowed by pre-pending a minus sign to the label value. The use of this directive makes sense if coupled with 'pre_tag_map'. @@ -1665,7 +1705,9 @@ enabled (nfacctd_renormalize or sfacctd_renormalize set to true) in order for the feature to work. If a specific router is not defined in the map, the sampling rate advertised by the router itself is applied. Take a look to the examples/ sub-tree 'sampling.map.example' for all - supported keys and detailed examples. + supported keys and detailed examples. Number of map entries (by default 384) can be modified + via maps_entries. Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal + (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: [ pmacctd_force_frag_handling | uacctd_force_frag_handling ] [GLOBAL, NO_NFACCTD, NO_SFACCTD] @@ -1757,20 +1799,6 @@ compiling). DEFAULT: json -KEY: classifiers [GLOBAL, NO_NFACCTD, NO_SFACCTD] -DESC: Full path to a spool directory containing the packet classification patterns (expected as .pat - or .so files; files with different extensions and subdirectories will be just ignored). This - feature enables packet/flow classification against application layer data (that is, the packet - payload) and based either over regular expression (RE) patterns (.pat) or external/pluggable C - modules (.so). Patterns are loaded in filename alphabetic order and will be evaluated in the - same order while classifying packets. Supported RE patterns are those from the great L7-filter - project, which is a new packet classifier for Linux kernel, and are avilable for download at: - http://sourceforge.net/projects/l7-filter/ (then point to the Protocol definitions archive). - Existing SO patterns are available at: http://www.pmacct.net/classification/ . - This configuration directive should be specified whenever the 'class' aggregation method is in - use (ie. 'aggregate: class'). It's supported only by pmacctd. -DEFAULT: none - KEY: sql_aggressive_classification VALUES: [ true | false ] DESC: Usually 5 to 10 packets are required to classify a stream by the 'classifiers' feature. Until @@ -1779,7 +1807,7 @@ they are still cached by the SQL plugin. This directive delays 'unknown' streams - but only those which would have still chances to be correctly classified - from being purged to the DB but only for a small number of consecutive sql_refresh_time slots. It is incompatible with - sql_dont_try_update and sql_use_copy directives. + sql_dont_try_update and sql_use_copy directives. This feature/directive is being phased-out. DEFAULT: false KEY: sql_locking_style @@ -1796,17 +1824,6 @@ recommended since serialization allows to contain database load. DEFAULT: table -KEY: classifier_tentatives [GLOBAL, NO_NFACCTD, NO_SFACCTD] -DESC: Number of tentatives to classify a stream. Usually 5 "full" (ie. carrying payload) packets are - sufficient to classify an uni-directional flow. This is the default value. However classifiers - not basing on the payload content may require a different (maybe larger) number of tentatives. -DEFAULT: 5 - -KEY: classifier_table_num [GLOBAL, NO_NFACCTD, NO_SFACCTD] -DESC: The maximum number of classifiers (SO + RE) that could be loaded runtime. The default number is - usually ok, but some "dirty" uses of classifiers might require more entries. -DEFAULT: 256 - KEY: nfprobe_timeouts DESC: Allows to tune a set of timeouts to be applied over collected packets. The value is expected in the following form: 'name=value:name=value:...'. The set of supported timeouts and their default @@ -1833,7 +1850,7 @@ KEY: nfprobe_receiver DESC: Defines the remote IP address/hostname and port to which NetFlow dagagrams are to be exported. - The value is expected to be in the usual form 'address:port'. + If IPv4, the value is expected as 'address:port'. If IPv6, it is expected as '[address]:port'. DEFAULT: 127.0.0.1:2100 KEY: nfprobe_source_ip @@ -1954,6 +1971,13 @@ to "1.2.3.4". DEFAULT: 1.2.3.4 +KEY: bgp_daemon_as [GLOBAL] +DESC: Defines the BGP Local AS to the supplied value. By default, no value supplied, the session + will be setup as iBGP with the Local AS received from the remote peer being copied back in + the BGP OPEN reply. This allows to explicitely set a Local AS which could be different from + the remote peer one hence establishing an eBGP session. +DEFAULT: none + KEY: [ bgp_daemon_port | bmp_daemon_port ] [GLOBAL] DESC: Binds the BGP/BMP daemon to a port different from the standard port. Default port for BGP is 179/tcp; default port for BMP is 1790. @@ -1987,9 +2011,9 @@ DEFAULT: 0 KEY: [ bgp_daemon_msglog_file | bmp_daemon_msglog_file | telemetry_daemon_msglog_file ] [GLOBAL] -DESC: Enables streamed logging of BGP tables/BMP events/streaming network telemetry data. Each log - entry features a time reference, peer/exporter IP address, event type and a sequence number - (to order events when time reference is not granular enough). BGP UPDATE messages also contain +DESC: Enables streamed logging of BGP tables/BMP events/Streaming Telemetry data. Each log entry + features a time reference, peer/exporter IP address, event type and a sequence number (to + order events when time reference is not granular enough). BGP UPDATE messages also contain full prefix and BGP attributes information. The list of supported filename variables follows: $peer_src_ip BGP/BMP peer IP address. @@ -2001,8 +2025,8 @@ [GLOBAL] VALUES: [ json ] DESC: Defines output format for the streamed logging of BGP/BMP messages and events/streaming - network telemetry. Only JSON format is currently supported and requires compiling against - Jansson library (--enable-jansson when configuring for compiling). + telemetry. Only JSON format is currently supported and requires compiling against Jansson + library (--enable-jansson when configuring for compiling). DEFAULT: json KEY: bgp_aspath_radius [GLOBAL] @@ -2051,8 +2075,8 @@ DESC: Full pathname to a file containing source peer AS mappings. The AS can be mapped to one or a combination of: ifIndex, source MAC address and BGP next-hop (query against the BGP RIB to look up the source IP prefix). This is sufficient to model popular tecniques for both - public and private BGP peerings. Number of map entries (by default 384) can be modified via - maps_entries. Sample map in 'examples/peers.map.example'. + public and private BGP peerings. Sample map in 'examples/peers.map.example'. Content can + be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: bgp_src_std_comm_type [GLOBAL] @@ -2069,6 +2093,13 @@ which is often not the case, affecting their accuracy. DEFAULT: none +KEY: bgp_src_lrg_comm_type [GLOBAL] +VALUES: [ bgp ] +DESC: Defines the method to use to map incoming traffic to a set of large communities. Only + native BGP RIB lookups are currently supported. BGP lookups assume traffic is symmetric, + which is often not the case, affecting their accuracy. +DEFAULT: none + KEY: bgp_src_as_path_type [GLOBAL] VALUES: [ bgp ] DESC: Defines the method to use to map incoming traffic to an AS-PATH. Only native BGP RIB lookups @@ -2086,8 +2117,9 @@ KEY: bgp_src_local_pref_map [GLOBAL, MAP] DESC: Full pathname to a file containing source local preference mappings. The LP value can be mapped to one or a combination of: ifIndex, source MAC address and BGP next-hop (query - against the BGP RIB to look up the source IP prefix). Number of map entries (by default - 384) can be modified via maps_entries. Sample map in 'examples/lpref.map.example'. + against the BGP RIB to look up the source IP prefix). Sample map in 'examples/ + lpref.map.example'. Content can be reloaded at runtime by sending the daemon a SIGUSR2 + signal (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: bgp_src_med_type [GLOBAL] @@ -2100,9 +2132,9 @@ KEY: bgp_src_med_map [GLOBAL, MAP] DESC: Full pathname to a file containing source MED (Multi Exit Discriminator) mappings. The MED value can be mapped to one or a combination of: ifIndex, source MAC address and BGP - next-hop (query against the BGP RIB to look up the source IP prefix). Number of map - entries (by default 384) can be modified via maps_entries. Sample map in 'examples/ - med.map.example'. + next-hop (query against the BGP RIB to look up the source IP prefix). Sample map in + 'examples/med.map.example'. Content can be reloaded at runtime by sending the daemon a + SIGUSR2 signal (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: bgp_agent_map [GLOBAL, MAP] @@ -2113,7 +2145,8 @@ uacctd daemons are required to use a bgp_agent_map with up to two "catch-all" entries - working in a primary/backup fashion (see agent_to_peer.map in the examples section): this is because these daemons do not have a NetFlow/sFlow source address to match to. - Number of map entries (by default 384) can be modified via maps_entries. + Number of map entries (by default 384) can be modified via maps_entries. Content can be + reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: flow_to_rd_map [GLOBAL, MAP] @@ -2121,7 +2154,8 @@ b) MPLS bottom label, BGP next-hop couples) to BGP/MPLS Virtual Private Network (VPN) Route Distinguisher (RD), based upon rfc4659. See flow_to_rd.map file in the examples section for further info. Number of map entries (by default 384) can be modified via - maps_entries. + maps_entries. Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal + (ie. "killall -USR2 nfacctd"). DEFAULT: none KEY: bgp_follow_default [GLOBAL] @@ -2220,11 +2254,11 @@ DEFAULT: path_id KEY: [ bgp_table_dump_file | bmp_dump_file | telemetry_dump_file ] [GLOBAL] -DESC: Enables dump of BGP tables/BMP events/streaming network telemetry data at regular time +DESC: Enables dump of BGP tables/BMP events/Streaming Telemetry data at regular time intervals (as defined by, for example, bgp_table_dump_refresh_time) into files. Each dump event features a time reference and peer/exporter IP address along with the - rest of BGP/BMP/streaming network telemetry data. The list of supported filename - variables follows: + rest of BGP/BMP/Streaming Telemetry data. The list of supported filename variables + follows: %d The day of the month as a decimal number (range 01 to 31). @@ -2244,21 +2278,21 @@ %Y The year as a decimal number including the century. - $peer_src_ip BGP or BMP peer/streaming network telemetry exporter IP address. + $peer_src_ip BGP or BMP peer/Streaming Telemetry exporter IP address. DEFAULT: none KEY: [ bgp_table_dump_output | bmp_dump_output | telemetry_dump_output ] [GLOBAL] VALUES: [ json ] -DESC: Defines output format for the dump of BGP tables/BMP events/streaming network - telemetry data. Only JSON format is currently supported and requires compiling - against Jansson library (--enable-jansson when configuring for compiling). +DESC: Defines output format for the dump of BGP tables/BMP events/Streaming Telemetry data. + Only JSON format is currently supported and requires compiling against Jansson library + (--enable-jansson when configuring for compiling). DEFAULT: json KEY: [ bgp_table_dump_refresh_time | bmp_dump_refresh_time | telemetry_dump_latest_file ] [GLOBAL] VALUES: [ 60 .. 86400 ] DESC: Time interval, in seconds, between two consecutive executions of the dump of BGP - tables/BMP events/streaming network telemetry data to files. + tables/BMP events/Streaming Telemetry data to files. DEFAULT: 0 KEY: [ bgp_table_dump_latest_file | bmp_dump_latest_file | telemetry_dump_refresh_time ] @@ -2318,9 +2352,10 @@ KEY: geoipv2_file [GLOBAL] DESC: If pmacct is compiled with --enable-geoipv2, this defines full pathname to a Maxmind GeoIP - database v2 (libmaxminddb, ie. https://dev.maxmind.com/geoip/geoip2/geolite2/ ). Only the - binary database format is supported (ie. it is not possible to load distinct CSVs for IPv4 - and IPv6 addresses). The use of --enable-geoip is mutually exclusive with --enable-geoipv2. + database v2 (libmaxminddb, ie. https://dev.maxmind.com/geoip/geoip2/geolite2/ ). It does + allow to resolve GeoIP-related primitives like countries and pocodes. Only the binary + database format is supported (ie. it is not possible to load distinct CSVs for IPv4 and + IPv6 addresses). The use of --enable-geoip is mutually exclusive with --enable-geoipv2. Files can be reloaded at runtime by sending the daemon a SIGUSR signal (ie. "killall -USR2 nfacctd"). @@ -2341,7 +2376,8 @@ DEFAULT: 1 KEY: tunnel_0 [GLOBAL, NO_NFACCTD, NO_SFACCTD] -DESC: Defines tunnel inspection, disabled by default. The daemon will then account on tunnelled +DESC: Defines tunnel inspection in pmacctd and uacctd, disabled by default (note: this feature + is currently unrelated to tunnel_* primitives). The daemon will then account on tunnelled data rather than on the envelope. The implementation approach is stateless, ie. control messages are not handled. Up to 4 tunnel layers are supported (ie. , ; , ; ...). Up to 8 tunnel stacks will be supported (ie. configuration @@ -2352,17 +2388,12 @@ tunnel_0: gtp, DEFAULT: none -KEY: tee_receiver -DESC: Defines remote IP address and port to which NetFlow/sFlow dagagrams are to be replicated - to. The value is expected to be in the usual form 'address:port'. Either tee_receiver key - (legacy) or tee_receivers is mandatory for a 'tee' plugin instance. -DEFAULT: none - KEY: tee_receivers [MAP] DESC: Defines full pathname to a list of remote IP addresses and ports to which NetFlow/sFlow dagagrams are to be replicated to. Examples are available in "examples/tee_receivers.lst. - example" file. Either tee_receiver key (legacy) or tee_receivers is mandatory for a 'tee' - plugin instance. + example" file. Number of map entries (by default 384) can be modified via maps_entries. + Content can be reloaded at runtime by sending the daemon a SIGUSR2 signal (ie. "killall + -USR2 nfacctd"). DEFAULT: none KEY: tee_source_ip @@ -2390,6 +2421,13 @@ allocated and cannot be changed at runtime. DEFAULT: 32 +KEY: tee_dissect_send_full_pkt +VALUES: [ true | false ] +DESC: When replicating and dissecting flow samples, send onto the tee plugin also the full + packet. This is useful in scenarios where, say, dissected flows are tagged while the + full packet is left untagged. By default this is left to false for security reasons. +DEFAULT: false + KEY: pkt_len_distrib_bins DESC: Defines a list of packet length distributions, comma-separated, which is then used to populate values for the 'pkt_len_ditrib' aggregation primitive. Values can be ranges or @@ -2398,14 +2436,6 @@ than a single bin the latest definition wins. DEFAULT: none -KEY: tmp_net_own_field -VALUES: [ true | false ] -DESC: Writes IP prefixes, src_net and dst_net primitives, to a own/distinct field than the - one used for IP addresses, src_host and dst_host primitives. This config directive is - meant for pmacct 1.6 only in order to ease backward compatibility by setting the value - of this directive to false. In the next major release this directive will be removed. -DEFAULT: true - KEY: tmp_asa_bi_flow VALUES: [ true | false ] DESC: Bi-flows use two counters to report counters, ie. bytes and packets, in forward and @@ -2414,13 +2444,6 @@ #232 and has been tested against a Cisco ASA export. DEFAULT: false -KEY: tmp_comms_same_field -VALUES: [ true | false ] -DESC: Writes BGP extended communities to a own/distinct field than the one used for BGP - standard communities. This config directive is meant for pmacct 1.6 only in order to - ease backward compatibility. In the next major release this directive will be removed. -DEFAULT: false - KEY: thread_stack DESC: Defines the stack size for threads screated by the daemon. The value is expected in bytes. A value of 0, default, leaves the stack size to the system default or pmacct @@ -2430,7 +2453,7 @@ KEY: telemetry_daemon [GLOBAL] VALUES: [ true | false ] -DESC: Enables the streaming telemetry thread in all daemons except pmtelemetryd (which does +DESC: Enables the Streaming Telemetry thread in all daemons except pmtelemetryd (which does collect telemetry as part of its core functionalities). Quoting Cisco IOS-XR Telemetry Configuration Guide at the time of this writing: "Streaming telemetry lets users direct data to a configured receiver. This data can be used for analysis and troubleshooting @@ -2442,29 +2465,29 @@ DEFAULT: false KEY: telemetry_daemon_port_tcp [GLOBAL] -DESC: Makes the streaming network telemetry daemon, pmtelemetryd, or the streaming network - telemetry thread listen on the specified TCP port. +DESC: Makes the Streaming Telemetry daemon, pmtelemetryd, or the Streaming Telemetry thread + listen on the specified TCP port. DEFAULT: none KEY: telemetry_daemon_port_udp [GLOBAL] -DESC: Makes the streaming network telemetry daemon, pmtelemetryd, or the streaming network - telemetry thread listen on the specified UDP port. +DESC: Makes the Streaming Telemetry daemon, pmtelemetryd, or the Streaming Telemetry thread + listen on the specified UDP port. DEFAULT: none KEY: telemetry_daemon_ip [GLOBAL] -DESC: Binds the streaming network telemetry daemon to a specific interface. Expects as value - an IPv4/IPv6 address. +DESC: Binds the Streaming Telemetry daemon to a specific interface. Expects as value an IPv4/ + IPv6 address. DEFAULT: 0.0.0.0 KEY: telemetry_daemon_decoder [GLOBAL] -VALUES: [ json | zjson | cisco_json | cisco_zjson ] -DESC: Sets the streaming network telemetry data decoder to the specified type. Cisco versions - of json and zjson prepend a 12 bytes proprietary header. +VALUES: [ json | zjson | cisco | cisco_json | cisco_zjson | cisco_gpb | cisco_gpb_kv ] +DESC: Sets the Streaming Telemetry data decoder to the specified type. Cisco versions of json, + gpb, etc. all prepend a 12 bytes proprietary header. DEFAULT: none KEY: telemetry_daemon_max_peers [GLOBAL] -DESC: Sets the maximum number of exporters the streaming network telemetry daemon can receive - data from. Upon reaching of such limit, no more exporters can send data to the daemon. +DESC: Sets the maximum number of exporters the Streaming Telemetry daemon can receive data from. + Upon reaching of such limit, no more exporters can send data to the daemon. DEFAULT: 100 KEY: telemetry_daemon_udp_timeout [GLOBAL] @@ -2479,11 +2502,53 @@ DEFAULT: none (ie. allow all) KEY: telemetry_daemon_pipe_size [GLOBAL] -DESC: Defines the size of the kernel socket used for streaming network telemetry datagrams - (see also bgp_daemon_pipe_size for more info). +DESC: Defines the size of the kernel socket used for Streaming Telemetry datagrams (see also + bgp_daemon_pipe_size for more info). DEFAULT: Operating System default KEY: telemetry_daemon_ipprec [GLOBAL] -DESC: Marks self-originated streaming network telemetry messages with the supplied IP - precedence value. Applies to TCP sessions only. +DESC: Marks self-originated Streaming Telemetry messages with the supplied IP precedence value. + Applies to TCP sessions only. DEFAULT: 0 + +KEY: classifier_num_roots [GLOBAL] +DESC: Defines the number of buckets of the nDPI memory structure on which to hash flows. + The more the buckets, the more memory will be allocated at startup and the smaller + - and hence more performing - each memory structure will be. +DEFAULT: 512 + +KEY: classifier_max_flows [GLOBAL] +DESC: Maximum number of concurrent flows allowed in the nDPI memory structure. +DEFAULT: 200000000 + +KEY: classifier_proto_guess [GLOBAL] +VALUES: [ true | false ] +DESC: If DPI classification is unsuccessful, and before giving up, try guessing the protocol + given collected flow characteristics, ie. IP protocol, port numbers, etc. +DEFAULT: false + +KEY: classifier_idle_scan_period [GLOBAL] +DESC: Defines the time interval, in seconds, at which going through the memory structure to + find for idle flows to expire. +DEFAULT: 10 + +KEY: classifier_idle_scan_budget [GLOBAL] +DESC: Defines the amount of idle flows to expire per each classifier_idle_scan_period. This + feature is to prevent too many flows to expire can disrupt the regular classification + activity. +DEFAULT: 1024 + +KEY: classifier_giveup_proto_tcp [GLOBAL] +DESC: Defines the maximum amount of packets to try to classify a TCP flow. After such amount + of trials, the flow will be marked as given up and no classification attempts will be + made anymore, until it expires. +DEFAULT: 10 + +KEY: classifier_giveup_proto_udp [GLOBAL] +DESC: Same as classifier_giveup_proto_tcp but for UDP flows. +DEFAULT: 8 + +KEY: classifier_giveup_proto_other [GLOBAL] +DESC: Same as classifier_giveup_proto_tcp but for flows which IP protocol is different than + TCP and UDP. +DEFAULT: 8 diff -Nru pmacct-1.6.1/configure pmacct-1.7.0/configure --- pmacct-1.6.1/configure 2016-10-31 18:59:35.000000000 +0000 +++ pmacct-1.7.0/configure 2017-10-20 20:20:00.000000000 +0000 @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for pmacct 1.6.1. +# Generated by GNU Autoconf 2.69 for pmacct 1.7.0. # # Report bugs to . # @@ -590,8 +590,8 @@ # Identity of this package. PACKAGE_NAME='pmacct' PACKAGE_TARNAME='pmacct' -PACKAGE_VERSION='1.6.1' -PACKAGE_STRING='pmacct 1.6.1' +PACKAGE_VERSION='1.7.0' +PACKAGE_STRING='pmacct 1.7.0' PACKAGE_BUGREPORT='paolo@pmacct.net' PACKAGE_URL='' @@ -636,16 +636,28 @@ am__EXEEXT_TRUE LTLIBOBJS LIBOBJS -WITH_AVRO_FALSE -WITH_AVRO_TRUE +USING_ST_BINS_FALSE +USING_ST_BINS_TRUE +USING_BMP_BINS_FALSE +USING_BMP_BINS_TRUE +USING_BGP_BINS_FALSE +USING_BGP_BINS_TRUE +USING_TRAFFIC_BINS_FALSE +USING_TRAFFIC_BINS_TRUE WITH_NFLOG_FALSE WITH_NFLOG_TRUE +WITH_NDPI_FALSE +WITH_NDPI_TRUE +WITH_AVRO_FALSE +WITH_AVRO_TRUE USING_THREADPOOL_FALSE USING_THREADPOOL_TRUE USING_SQL_FALSE USING_SQL_TRUE WITH_KAFKA_FALSE WITH_KAFKA_TRUE +WITH_ZMQ_FALSE +WITH_ZMQ_TRUE WITH_RABBITMQ_FALSE WITH_RABBITMQ_TRUE WITH_SQLITE3_FALSE @@ -660,6 +672,9 @@ EXTRABIN NFLOG_LIBS NFLOG_CFLAGS +NDPI_LIBS_STATIC +NDPI_LIBS +NDPI_CFLAGS AVRO_LIBS AVRO_CFLAGS JANSSON_LIBS @@ -670,6 +685,8 @@ GEOIP_CFLAGS KAFKA_LIBS KAFKA_CFLAGS +ZMQ_LIBS +ZMQ_CFLAGS RABBITMQ_LIBS RABBITMQ_CFLAGS SQLITE3_LIBS @@ -680,6 +697,7 @@ PGSQL_CFLAGS MYSQL_LIBS MYSQL_CFLAGS +MYSQL_VERSION MYSQL_CONFIG MAKE PKG_CONFIG_LIBDIR @@ -822,14 +840,21 @@ enable_mongodb enable_sqlite3 enable_rabbitmq +enable_zmq enable_kafka enable_geoip enable_geoipv2 enable_jansson enable_avro +with_ndpi_static_lib +enable_ndpi enable_64bit enable_threads enable_nflog +enable_traffic_bins +enable_bgp_bins +enable_bmp_bins +enable_st_bins ' ac_precious_vars='build_alias host_alias @@ -851,6 +876,8 @@ SQLITE3_LIBS RABBITMQ_CFLAGS RABBITMQ_LIBS +ZMQ_CFLAGS +ZMQ_LIBS KAFKA_CFLAGS KAFKA_LIBS GEOIP_CFLAGS @@ -861,6 +888,8 @@ JANSSON_LIBS AVRO_CFLAGS AVRO_LIBS +NDPI_CFLAGS +NDPI_LIBS NFLOG_CFLAGS NFLOG_LIBS' @@ -1403,7 +1432,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures pmacct 1.6.1 to adapt to many kinds of systems. +\`configure' configures pmacct 1.7.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1473,7 +1502,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of pmacct 1.6.1:";; + short | recursive ) echo "Configuration of pmacct 1.7.0:";; esac cat <<\_ACEOF @@ -1494,21 +1523,27 @@ --enable-relax Relax compiler optimization (default: no) --disable-so Disable linking against shared objects (default: no) --enable-l2 Enable Layer-2 features and support (default: yes) - --enable-ipv6 Enable IPv6 code (default: no) + --enable-ipv6 Enable IPv6 code (default: yes) --enable-plabel Enable IP prefix labels (default: no) --enable-mysql Enable MySQL support (default: no) --enable-pgsql Enable PostgreSQL support (default: no) --enable-mongodb Enable MongoDB support (default: no) --enable-sqlite3 Enable SQLite3 support (default: no) --enable-rabbitmq Enable RabbitMQ/AMQP support (default: no) + --enable-zmq Enable ZMQ/AMQP support (default: no) --enable-kafka Enable Kafka support (default: no) --enable-geoip Enable GeoIP support (default: no) --enable-geoipv2 Enable GeoIPv2 (libmaxminddb) support (default: no) --enable-jansson Enable Jansson support (default: no) - --enable-avro Enable avro support (default: no) + --enable-avro Enable Apache Avro support (default: no) + --enable-ndpi Enable nDPI support (default: no) --enable-64bit Enable 64bit counters (default: yes) --enable-threads Enable multi-threading in pmacct (default: yes) --enable-nflog Enable NFLOG support (default: no) + --enable-traffic-bins Link IPv4/IPv6 traffic accounting binaries (default: yes) + --enable-bgp-bins Link BGP daemon binaries (default: yes) + --enable-bmp-bins Link BMP daemon binaries (default: yes) + --enable-st-bins Link Streaming Telemetry daemon binaries (default: yes) Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] @@ -1518,8 +1553,9 @@ --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot=DIR Search for dependent libraries within DIR (or the compiler's sysroot if not specified). - --with-pcap-includes=DIR Search the specified directories for header files - --with-pcap-libs=DIR Search the specified directories for libraries + --with-pcap-includes=DIR Search the specified directory for header files + --with-pcap-libs=DIR Search the specified directory for pcap library + --with-ndpi-static-lib=DIR Search the specified directory for nDPI static library Some influential environment variables: CC C compiler command @@ -1541,6 +1577,8 @@ SQLITE3_LIBS linker flags for SQLITE3, overriding pkg-config RABBITMQ_CFLAGS C compiler flags for RABBITMQ, overriding pkg-config RABBITMQ_LIBS linker flags for RABBITMQ, overriding pkg-config + ZMQ_CFLAGS C compiler flags for ZMQ, overriding pkg-config + ZMQ_LIBS linker flags for ZMQ, overriding pkg-config KAFKA_CFLAGS C compiler flags for KAFKA, overriding pkg-config KAFKA_LIBS linker flags for KAFKA, overriding pkg-config GEOIP_CFLAGS C compiler flags for GEOIP, overriding pkg-config @@ -1553,6 +1591,8 @@ AVRO_LIBS linker flags for AVRO, overriding pkg-config NFLOG_CFLAGS C compiler flags for NFLOG, overriding pkg-config NFLOG_LIBS linker flags for NFLOG, overriding pkg-config + NDPI_CFLAGS C compiler flags for dynamic nDPI, overriding pkg-config + NDPI_LIBS linker flags for dynamic nDPI, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. @@ -1620,7 +1660,7 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -pmacct configure 1.6.1 +pmacct configure 1.7.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2043,7 +2083,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by pmacct $as_me 1.6.1, which was +It was created by pmacct $as_me 1.7.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2858,7 +2898,7 @@ # Define the identity of the package. PACKAGE='pmacct' - VERSION='1.6.1' + VERSION='1.7.0' cat >>confdefs.h <<_ACEOF @@ -11504,10 +11544,6 @@ COMPILE_ARGS="${ac_configure_args}" -cat >>confdefs.h <<_ACEOF -#define COMPILE_ARGS "$COMPILE_ARGS" -_ACEOF - ac_ext=c ac_cpp='$CPP $CPPFLAGS' @@ -12300,9 +12336,7 @@ fi fi -host_os=`uname` -host_cpu=`uname -m` -host_os1=`uname -rs` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking OS" >&5 $as_echo_n "checking OS... " >&6; } @@ -12500,6 +12534,10 @@ $as_echo "#define BSD 1" >>confdefs.h ;; + linux*) + $as_echo "#define LINUX 1" >>confdefs.h + + ;; esac case "$host_cpu" in @@ -12847,6 +12885,7 @@ $as_echo "yes" >&6; } $as_echo "#define HAVE_L2 1" >>confdefs.h + COMPILE_ARGS="${COMPILE_ARGS} '--enable-l2'" fi @@ -12901,9 +12940,42 @@ else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ipv6support="no" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + for ac_func in inet_pton +do : + ac_fn_c_check_func "$LINENO" "inet_pton" "ac_cv_func_inet_pton" +if test "x$ac_cv_func_inet_pton" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_INET_PTON 1 +_ACEOF + +fi +done + + if test x"$ac_cv_func_inet_pton" = x"no"; then + as_fn_error $? "ERROR: missing inet_pton(); disable IPv6 hooks !" "$LINENO" 5 + fi + + for ac_func in inet_ntop +do : + ac_fn_c_check_func "$LINENO" "inet_ntop" "ac_cv_func_inet_ntop" +if test "x$ac_cv_func_inet_ntop" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_INET_NTOP 1 +_ACEOF + +fi +done + + if test x"$ac_cv_func_inet_ntop" = x"no"; then + as_fn_error $? "ERROR: missing inet_ntop(); disable IPv6 hooks !" "$LINENO" 5 + fi + + $as_echo "#define ENABLE_IPV6 1" >>confdefs.h + + ipv6support="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-ipv6'" fi @@ -13032,6 +13104,8 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PFRING_LIB_FOUND=1 + $as_echo "#define PFRING_LIB_FOUND 1" >>confdefs.h + else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } @@ -13058,6 +13132,8 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } PFRING_LIB_FOUND=1 + $as_echo "#define PFRING_LIB_FOUND 1" >>confdefs.h + else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } @@ -13120,9 +13196,9 @@ fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcap_setnonblock in -lpcap" >&5 -$as_echo_n "checking for pcap_setnonblock in -lpcap... " >&6; } -if ${ac_cv_lib_pcap_pcap_setnonblock+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcap_set_protocol in -lpcap" >&5 +$as_echo_n "checking for pcap_set_protocol in -lpcap... " >&6; } +if ${ac_cv_lib_pcap_pcap_set_protocol+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS @@ -13136,28 +13212,28 @@ #ifdef __cplusplus extern "C" #endif -char pcap_setnonblock (); +char pcap_set_protocol (); int main () { -return pcap_setnonblock (); +return pcap_set_protocol (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_pcap_pcap_setnonblock=yes + ac_cv_lib_pcap_pcap_set_protocol=yes else - ac_cv_lib_pcap_pcap_setnonblock=no + ac_cv_lib_pcap_pcap_set_protocol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pcap_pcap_setnonblock" >&5 -$as_echo "$ac_cv_lib_pcap_pcap_setnonblock" >&6; } -if test "x$ac_cv_lib_pcap_pcap_setnonblock" = xyes; then : - $as_echo "#define PCAP_7 1" >>confdefs.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pcap_pcap_set_protocol" >&5 +$as_echo "$ac_cv_lib_pcap_pcap_set_protocol" >&6; } +if test "x$ac_cv_lib_pcap_pcap_set_protocol" = xyes; then : + $as_echo "#define PCAP_SET_PROTOCOL 1" >>confdefs.h fi @@ -13349,6 +13425,57 @@ fi + if test "$MYSQL_CONFIG" != "no"; then + MYSQL_VERSION=`$MYSQL_CONFIG --version` + found_mysql="yes" + else + found_mysql="no" + fi + + + mysql_version_req=5.6.3 + + if test "$found_mysql" = "yes" -a -n "$mysql_version_req"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if MySQL version is >= $mysql_version_req" >&5 +$as_echo_n "checking if MySQL version is >= $mysql_version_req... " >&6; } + + mysql_version_req_major=`expr $mysql_version_req : '\([0-9]*\)'` + mysql_version_req_minor=`expr $mysql_version_req : '[0-9]*\.\([0-9]*\)'` + mysql_version_req_micro=`expr $mysql_version_req : '[0-9]*\.[0-9]*\.\([0-9]*\)'` + if test "x$mysql_version_req_micro" = "x"; then + mysql_version_req_micro="0" + fi + + mysql_version_req_number=`expr $mysql_version_req_major \* 1000000 \ + \+ $mysql_version_req_minor \* 1000 \ + \+ $mysql_version_req_micro` + + mysql_version_major=`expr $MYSQL_VERSION : '\([0-9]*\)'` + mysql_version_minor=`expr $MYSQL_VERSION : '[0-9]*\.\([0-9]*\)'` + mysql_version_micro=`expr $MYSQL_VERSION : '[0-9]*\.[0-9]*\.\([0-9]*\)'` + if test "x$mysql_version_micro" = "x"; then + mysql_version_micro="0" + fi + + mysql_version_number=`expr $mysql_version_major \* 1000000 \ + \+ $mysql_version_minor \* 1000 \ + \+ $mysql_version_micro` + + mysql_version_check=`expr $mysql_version_number \>\= $mysql_version_req_number` + + if test "$mysql_version_check" = "1"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + + + + + PLUGINS="${PLUGINS} mysql" @@ -14116,12 +14243,12 @@ pkg_cv_RABBITMQ_CFLAGS="$RABBITMQ_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"librabbitmq\""; } >&5 - ($PKG_CONFIG --exists --print-errors "librabbitmq") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"librabbitmq >= 0.8.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "librabbitmq >= 0.8.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_RABBITMQ_CFLAGS=`$PKG_CONFIG --cflags "librabbitmq" 2>/dev/null` + pkg_cv_RABBITMQ_CFLAGS=`$PKG_CONFIG --cflags "librabbitmq >= 0.8.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14133,12 +14260,12 @@ pkg_cv_RABBITMQ_LIBS="$RABBITMQ_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"librabbitmq\""; } >&5 - ($PKG_CONFIG --exists --print-errors "librabbitmq") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"librabbitmq >= 0.8.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "librabbitmq >= 0.8.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_RABBITMQ_LIBS=`$PKG_CONFIG --libs "librabbitmq" 2>/dev/null` + pkg_cv_RABBITMQ_LIBS=`$PKG_CONFIG --libs "librabbitmq >= 0.8.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14159,14 +14286,14 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - RABBITMQ_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "librabbitmq" 2>&1` + RABBITMQ_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "librabbitmq >= 0.8.0" 2>&1` else - RABBITMQ_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "librabbitmq" 2>&1` + RABBITMQ_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "librabbitmq >= 0.8.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$RABBITMQ_PKG_ERRORS" >&5 - as_fn_error $? "Package requirements (librabbitmq) were not met: + as_fn_error $? "Package requirements (librabbitmq >= 0.8.0) were not met: $RABBITMQ_PKG_ERRORS @@ -14218,6 +14345,124 @@ fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable ZMQ/AMQP support" >&5 +$as_echo_n "checking whether to enable ZMQ/AMQP support... " >&6; } +# Check whether --enable-zmq was given. +if test "${enable_zmq+set}" = set; then : + enableval=$enable_zmq; case "$enableval" in + yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ZMQ" >&5 +$as_echo_n "checking for ZMQ... " >&6; } + +if test -n "$ZMQ_CFLAGS"; then + pkg_cv_ZMQ_CFLAGS="$ZMQ_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libzmq >= 4.2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libzmq >= 4.2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_ZMQ_CFLAGS=`$PKG_CONFIG --cflags "libzmq >= 4.2.0" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$ZMQ_LIBS"; then + pkg_cv_ZMQ_LIBS="$ZMQ_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libzmq >= 4.2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libzmq >= 4.2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_ZMQ_LIBS=`$PKG_CONFIG --libs "libzmq >= 4.2.0" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + ZMQ_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libzmq >= 4.2.0" 2>&1` + else + ZMQ_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libzmq >= 4.2.0" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$ZMQ_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (libzmq >= 4.2.0) were not met: + +$ZMQ_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables ZMQ_CFLAGS +and ZMQ_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables ZMQ_CFLAGS +and ZMQ_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } +else + ZMQ_CFLAGS=$pkg_cv_ZMQ_CFLAGS + ZMQ_LIBS=$pkg_cv_ZMQ_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi + SUPPORTS="${SUPPORTS} zmq" + USING_ZMQ="yes" + PMACCT_CFLAGS="$PMACCT_CFLAGS $ZMQ_CFLAGS" + $as_echo "#define WITH_ZMQ 1" >>confdefs.h + + ;; + no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable Kafka support" >&5 $as_echo_n "checking whether to enable Kafka support... " >&6; } @@ -14236,12 +14481,12 @@ pkg_cv_KAFKA_CFLAGS="$KAFKA_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"rdkafka >= 0.8.5\""; } >&5 - ($PKG_CONFIG --exists --print-errors "rdkafka >= 0.8.5") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"rdkafka >= 0.9.2\""; } >&5 + ($PKG_CONFIG --exists --print-errors "rdkafka >= 0.9.2") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_KAFKA_CFLAGS=`$PKG_CONFIG --cflags "rdkafka >= 0.8.5" 2>/dev/null` + pkg_cv_KAFKA_CFLAGS=`$PKG_CONFIG --cflags "rdkafka >= 0.9.2" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14253,12 +14498,12 @@ pkg_cv_KAFKA_LIBS="$KAFKA_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"rdkafka >= 0.8.5\""; } >&5 - ($PKG_CONFIG --exists --print-errors "rdkafka >= 0.8.5") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"rdkafka >= 0.9.2\""; } >&5 + ($PKG_CONFIG --exists --print-errors "rdkafka >= 0.9.2") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_KAFKA_LIBS=`$PKG_CONFIG --libs "rdkafka >= 0.8.5" 2>/dev/null` + pkg_cv_KAFKA_LIBS=`$PKG_CONFIG --libs "rdkafka >= 0.9.2" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14279,9 +14524,9 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - KAFKA_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "rdkafka >= 0.8.5" 2>&1` + KAFKA_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "rdkafka >= 0.9.2" 2>&1` else - KAFKA_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "rdkafka >= 0.8.5" 2>&1` + KAFKA_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "rdkafka >= 0.9.2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$KAFKA_PKG_ERRORS" >&5 @@ -14611,7 +14856,7 @@ $as_echo "yes" >&6; } fi - PLUGINS="${PLUGINS} geoip" + SUPPORTS="${SUPPORTS} geoip" USING_MMGEOIP="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $GEOIP_CFLAGS" $as_echo "#define WITH_GEOIP 1" >>confdefs.h @@ -14648,12 +14893,12 @@ pkg_cv_GEOIPV2_CFLAGS="$GEOIPV2_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmaxminddb >= 1.0.0\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libmaxminddb >= 1.0.0") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmaxminddb >= 1.2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libmaxminddb >= 1.2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_GEOIPV2_CFLAGS=`$PKG_CONFIG --cflags "libmaxminddb >= 1.0.0" 2>/dev/null` + pkg_cv_GEOIPV2_CFLAGS=`$PKG_CONFIG --cflags "libmaxminddb >= 1.2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14665,12 +14910,12 @@ pkg_cv_GEOIPV2_LIBS="$GEOIPV2_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmaxminddb >= 1.0.0\""; } >&5 - ($PKG_CONFIG --exists --print-errors "libmaxminddb >= 1.0.0") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmaxminddb >= 1.2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libmaxminddb >= 1.2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_GEOIPV2_LIBS=`$PKG_CONFIG --libs "libmaxminddb >= 1.0.0" 2>/dev/null` + pkg_cv_GEOIPV2_LIBS=`$PKG_CONFIG --libs "libmaxminddb >= 1.2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14691,9 +14936,9 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - GEOIPV2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmaxminddb >= 1.0.0" 2>&1` + GEOIPV2_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmaxminddb >= 1.2.0" 2>&1` else - GEOIPV2_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmaxminddb >= 1.0.0" 2>&1` + GEOIPV2_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmaxminddb >= 1.2.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$GEOIPV2_PKG_ERRORS" >&5 @@ -14904,7 +15149,7 @@ $as_echo "yes" >&6; } fi - PLUGINS="${PLUGINS} geoipv2" + SUPPORTS="${SUPPORTS} geoipv2" USING_MMGEOIPV2="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $GEOIPV2_CFLAGS" $as_echo "#define WITH_GEOIPV2 1" >>confdefs.h @@ -14941,12 +15186,12 @@ pkg_cv_JANSSON_CFLAGS="$JANSSON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"jansson >= 2.2\""; } >&5 - ($PKG_CONFIG --exists --print-errors "jansson >= 2.2") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"jansson >= 2.5\""; } >&5 + ($PKG_CONFIG --exists --print-errors "jansson >= 2.5") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_JANSSON_CFLAGS=`$PKG_CONFIG --cflags "jansson >= 2.2" 2>/dev/null` + pkg_cv_JANSSON_CFLAGS=`$PKG_CONFIG --cflags "jansson >= 2.5" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14958,12 +15203,12 @@ pkg_cv_JANSSON_LIBS="$JANSSON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ - { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"jansson >= 2.2\""; } >&5 - ($PKG_CONFIG --exists --print-errors "jansson >= 2.2") 2>&5 + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"jansson >= 2.5\""; } >&5 + ($PKG_CONFIG --exists --print-errors "jansson >= 2.5") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then - pkg_cv_JANSSON_LIBS=`$PKG_CONFIG --libs "jansson >= 2.2" 2>/dev/null` + pkg_cv_JANSSON_LIBS=`$PKG_CONFIG --libs "jansson >= 2.5" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes @@ -14984,14 +15229,14 @@ _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then - JANSSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "jansson >= 2.2" 2>&1` + JANSSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "jansson >= 2.5" 2>&1` else - JANSSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "jansson >= 2.2" 2>&1` + JANSSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "jansson >= 2.5" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$JANSSON_PKG_ERRORS" >&5 - as_fn_error $? "Package requirements (jansson >= 2.2) were not met: + as_fn_error $? "Package requirements (jansson >= 2.5) were not met: $JANSSON_PKG_ERRORS @@ -15023,7 +15268,7 @@ $as_echo "yes" >&6; } fi - PLUGINS="${PLUGINS} jansson" + SUPPORTS="${SUPPORTS} jansson" USING_JANSSON="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $JANSSON_CFLAGS" $as_echo "#define WITH_JANSSON 1" >>confdefs.h @@ -15201,6 +15446,7 @@ $as_echo "yes" >&6; } fi + SUPPORTS="${SUPPORTS} avro" USING_AVRO="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $AVRO_CFLAGS" $as_echo "#define WITH_AVRO 1" >>confdefs.h @@ -15268,6 +15514,224 @@ fi + +# Check whether --with-ndpi-static-lib was given. +if test "${with_ndpi_static_lib+set}" = set; then : + withval=$with_ndpi_static_lib; + + absdir=`cd $withval 2>/dev/null && pwd` + if test x$absdir != x ; then + withval=$absdir + fi + + NDPI_CUST_STATIC_LIB=$withval + +fi + + +if test x"$NDPI_CUST_STATIC_LIB" != x""; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking your own nDPI library" >&5 +$as_echo_n "checking your own nDPI library... " >&6; } + if test -r $NDPI_CUST_STATIC_LIB/libndpi.a; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + NDPI_CUST_STATIC_LIB_FOUND="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "ERROR: unable to find nDPI library in $NDPI_CUST_STATIC_LIB" "$LINENO" 5 + fi +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable nDPI support" >&5 +$as_echo_n "checking whether to enable nDPI support... " >&6; } +# Check whether --enable-ndpi was given. +if test "${enable_ndpi+set}" = set; then : + enableval=$enable_ndpi; case "$enableval" in + yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for NDPI" >&5 +$as_echo_n "checking for NDPI... " >&6; } + +if test -n "$NDPI_CFLAGS"; then + pkg_cv_NDPI_CFLAGS="$NDPI_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libndpi >= 2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libndpi >= 2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_NDPI_CFLAGS=`$PKG_CONFIG --cflags "libndpi >= 2.0" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$NDPI_LIBS"; then + pkg_cv_NDPI_LIBS="$NDPI_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libndpi >= 2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libndpi >= 2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_NDPI_LIBS=`$PKG_CONFIG --libs "libndpi >= 2.0" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + NDPI_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libndpi >= 2.0" 2>&1` + else + NDPI_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libndpi >= 2.0" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$NDPI_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (libndpi >= 2.0) were not met: + +$NDPI_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables NDPI_CFLAGS +and NDPI_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables NDPI_CFLAGS +and NDPI_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } +else + NDPI_CFLAGS=$pkg_cv_NDPI_CFLAGS + NDPI_LIBS=$pkg_cv_NDPI_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi + SUPPORTS="${SUPPORTS} ndpi" + USING_NDPI="yes" + + if test x"$NDPI_CFLAGS" != x""; then + NDPI_CFLAGS_INST=`echo $NDPI_CFLAGS | sed 's/ $//'` + NDPI_CFLAGS_INST="$NDPI_CFLAGS_INST/libndpi" + else + NDPI_CFLAGS_INST="" + fi + PMACCT_CFLAGS="$PMACCT_CFLAGS $NDPI_CFLAGS $NDPI_CFLAGS_INST" + + $as_echo "#define WITH_NDPI 1" >>confdefs.h + + _save_LIBS="$LIBS" + LIBS="$LIBS $NDPI_LIBS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ndpi_init_detection_module in -lndpi" >&5 +$as_echo_n "checking for ndpi_init_detection_module in -lndpi... " >&6; } +if ${ac_cv_lib_ndpi_ndpi_init_detection_module+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lndpi $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ndpi_init_detection_module (); +int +main () +{ +return ndpi_init_detection_module (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ndpi_ndpi_init_detection_module=yes +else + ac_cv_lib_ndpi_ndpi_init_detection_module=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ndpi_ndpi_init_detection_module" >&5 +$as_echo "$ac_cv_lib_ndpi_ndpi_init_detection_module" >&6; } +if test "x$ac_cv_lib_ndpi_ndpi_init_detection_module" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBNDPI 1 +_ACEOF + + LIBS="-lndpi $LIBS" + +fi + + LIBS="$_save_LIBS" + + if test x"$NDPI_CUST_STATIC_LIB_FOUND" = x"yes"; then + NDPI_LIBS_STATIC="$NDPI_CUST_STATIC_LIB/libndpi.a" + elif test -r /usr/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/lib/libndpi.a" + elif test -r /usr/local/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/local/lib/libndpi.a" + elif test -r /usr/local/nDPI/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/local/nDPI/lib/libndpi.a" + else + as_fn_error $? "ERROR: missing nDPI static library" "$LINENO" 5 + fi + + + ;; + no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi + + if test x"$USING_DLOPEN" = x"yes"; then $as_echo "#define HAVE_DLOPEN 1" >>confdefs.h @@ -15603,6 +16067,7 @@ $as_echo "yes" >&6; } $as_echo "#define HAVE_64BIT_COUNTERS 1" >>confdefs.h + COMPILE_ARGS="${COMPILE_ARGS} '--enable-64bit'" fi @@ -15653,6 +16118,7 @@ LIBS="${LIBS} -lpthread" USING_THREADPOOL=yes + COMPILE_ARGS="${COMPILE_ARGS} '--enable-threads'" fi @@ -15772,6 +16238,122 @@ fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to link IPv4/IPv6 traffic accounting accounting binaries" >&5 +$as_echo_n "checking whether to link IPv4/IPv6 traffic accounting accounting binaries... " >&6; } +# Check whether --enable-traffic-bins was given. +if test "${enable_traffic_bins+set}" = set; then : + enableval=$enable_traffic_bins; + if test x$enableval = x"yes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_TRAFFIC_BINS 1" >>confdefs.h + + USING_TRAFFIC_BINS="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_TRAFFIC_BINS 1" >>confdefs.h + + USING_TRAFFIC_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-traffic-bins'" + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to link BGP daemon binaries" >&5 +$as_echo_n "checking whether to link BGP daemon binaries... " >&6; } +# Check whether --enable-bgp-bins was given. +if test "${enable_bgp_bins+set}" = set; then : + enableval=$enable_bgp_bins; + if test x$enableval = x"yes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_BGP_BINS 1" >>confdefs.h + + USING_BGP_BINS="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_BGP_BINS 1" >>confdefs.h + + USING_BGP_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-bgp-bins'" + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to link BMP daemon binaries" >&5 +$as_echo_n "checking whether to link BMP daemon binaries... " >&6; } +# Check whether --enable-bmp-bins was given. +if test "${enable_bmp_bins+set}" = set; then : + enableval=$enable_bmp_bins; + if test x$enableval = x"yes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_BMP_BINS 1" >>confdefs.h + + USING_BMP_BINS="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_BMP_BINS 1" >>confdefs.h + + USING_BMP_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-bmp-bins'" + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to link Streaming Telemetry daemon binaries" >&5 +$as_echo_n "checking whether to link Streaming Telemetry daemon binaries... " >&6; } +# Check whether --enable-st-bins was given. +if test "${enable_st_bins+set}" = set; then : + enableval=$enable_st_bins; + if test x$enableval = x"yes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_ST_BINS 1" >>confdefs.h + + USING_ST_BINS="yes" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + $as_echo "#define HAVE_ST_BINS 1" >>confdefs.h + + USING_ST_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-st-bins'" + + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } if ${ac_cv_type_signal+:} false; then : @@ -15819,6 +16401,10 @@ done +cat >>confdefs.h <<_ACEOF +#define COMPILE_ARGS "$COMPILE_ARGS" +_ACEOF + CFLAGS="${CFLAGS} ${INCLUDES}" INCLUDES="" @@ -15830,6 +16416,7 @@ LIBS ......... : ${LIBS} LDFLAGS ...... : ${LDFLAGS} PLUGINS ...... : ${PLUGINS} +SUPPORTS ..... : ${SUPPORTS} Now type 'make' to compile the source code. @@ -15887,6 +16474,14 @@ WITH_RABBITMQ_FALSE= fi + if test x"$USING_ZMQ" = x"yes"; then + WITH_ZMQ_TRUE= + WITH_ZMQ_FALSE='#' +else + WITH_ZMQ_TRUE='#' + WITH_ZMQ_FALSE= +fi + if test x"$USING_KAFKA" = x"yes"; then WITH_KAFKA_TRUE= WITH_KAFKA_FALSE='#' @@ -15911,6 +16506,22 @@ USING_THREADPOOL_FALSE= fi + if test x"$USING_AVRO" = x"yes"; then + WITH_AVRO_TRUE= + WITH_AVRO_FALSE='#' +else + WITH_AVRO_TRUE='#' + WITH_AVRO_FALSE= +fi + + if test x"$USING_NDPI" = x"yes"; then + WITH_NDPI_TRUE= + WITH_NDPI_FALSE='#' +else + WITH_NDPI_TRUE='#' + WITH_NDPI_FALSE= +fi + if test x"$USING_NFLOG" = x"yes"; then WITH_NFLOG_TRUE= WITH_NFLOG_FALSE='#' @@ -15919,15 +16530,39 @@ WITH_NFLOG_FALSE= fi - if test x"$USING_AVRO" = x"yes"; then - WITH_AVRO_TRUE= - WITH_AVRO_FALSE='#' + if test x"$USING_TRAFFIC_BINS" = x"yes"; then + USING_TRAFFIC_BINS_TRUE= + USING_TRAFFIC_BINS_FALSE='#' else - WITH_AVRO_TRUE='#' - WITH_AVRO_FALSE= + USING_TRAFFIC_BINS_TRUE='#' + USING_TRAFFIC_BINS_FALSE= fi -ac_config_files="$ac_config_files Makefile src/Makefile src/nfprobe_plugin/Makefile src/sfprobe_plugin/Makefile src/bgp/Makefile src/tee_plugin/Makefile src/isis/Makefile src/bmp/Makefile src/telemetry/Makefile" + if test x"$USING_BGP_BINS" = x"yes"; then + USING_BGP_BINS_TRUE= + USING_BGP_BINS_FALSE='#' +else + USING_BGP_BINS_TRUE='#' + USING_BGP_BINS_FALSE= +fi + + if test x"$USING_BMP_BINS" = x"yes"; then + USING_BMP_BINS_TRUE= + USING_BMP_BINS_FALSE='#' +else + USING_BMP_BINS_TRUE='#' + USING_BMP_BINS_FALSE= +fi + + if test x"$USING_ST_BINS" = x"yes"; then + USING_ST_BINS_TRUE= + USING_ST_BINS_FALSE='#' +else + USING_ST_BINS_TRUE='#' + USING_ST_BINS_FALSE= +fi + +ac_config_files="$ac_config_files Makefile src/Makefile src/nfprobe_plugin/Makefile src/sfprobe_plugin/Makefile src/bgp/Makefile src/tee_plugin/Makefile src/isis/Makefile src/bmp/Makefile src/telemetry/Makefile src/ndpi/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -16114,6 +16749,10 @@ as_fn_error $? "conditional \"WITH_RABBITMQ\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${WITH_ZMQ_TRUE}" && test -z "${WITH_ZMQ_FALSE}"; then + as_fn_error $? "conditional \"WITH_ZMQ\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${WITH_KAFKA_TRUE}" && test -z "${WITH_KAFKA_FALSE}"; then as_fn_error $? "conditional \"WITH_KAFKA\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 @@ -16126,12 +16765,32 @@ as_fn_error $? "conditional \"USING_THREADPOOL\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${WITH_AVRO_TRUE}" && test -z "${WITH_AVRO_FALSE}"; then + as_fn_error $? "conditional \"WITH_AVRO\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${WITH_NDPI_TRUE}" && test -z "${WITH_NDPI_FALSE}"; then + as_fn_error $? "conditional \"WITH_NDPI\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${WITH_NFLOG_TRUE}" && test -z "${WITH_NFLOG_FALSE}"; then as_fn_error $? "conditional \"WITH_NFLOG\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -if test -z "${WITH_AVRO_TRUE}" && test -z "${WITH_AVRO_FALSE}"; then - as_fn_error $? "conditional \"WITH_AVRO\" was never defined. +if test -z "${USING_TRAFFIC_BINS_TRUE}" && test -z "${USING_TRAFFIC_BINS_FALSE}"; then + as_fn_error $? "conditional \"USING_TRAFFIC_BINS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_BGP_BINS_TRUE}" && test -z "${USING_BGP_BINS_FALSE}"; then + as_fn_error $? "conditional \"USING_BGP_BINS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_BMP_BINS_TRUE}" && test -z "${USING_BMP_BINS_FALSE}"; then + as_fn_error $? "conditional \"USING_BMP_BINS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${USING_ST_BINS_TRUE}" && test -z "${USING_ST_BINS_FALSE}"; then + as_fn_error $? "conditional \"USING_ST_BINS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi @@ -16531,7 +17190,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by pmacct $as_me 1.6.1, which was +This file was extended by pmacct $as_me 1.7.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -16588,7 +17247,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -pmacct config.status 1.6.1 +pmacct config.status 1.7.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -16994,6 +17653,7 @@ "src/isis/Makefile") CONFIG_FILES="$CONFIG_FILES src/isis/Makefile" ;; "src/bmp/Makefile") CONFIG_FILES="$CONFIG_FILES src/bmp/Makefile" ;; "src/telemetry/Makefile") CONFIG_FILES="$CONFIG_FILES src/telemetry/Makefile" ;; + "src/ndpi/Makefile") CONFIG_FILES="$CONFIG_FILES src/ndpi/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac diff -Nru pmacct-1.6.1/configure.ac pmacct-1.7.0/configure.ac --- pmacct-1.6.1/configure.ac 2016-10-31 18:58:52.000000000 +0000 +++ pmacct-1.7.0/configure.ac 2017-10-20 20:19:38.000000000 +0000 @@ -1,7 +1,7 @@ dnl Process this file with autoconf to produce a configure script. dnl configuration file for pmacct -AC_INIT([pmacct], [1.6.1], [paolo@pmacct.net]) +AC_INIT([pmacct], [1.7.0], [paolo@pmacct.net]) AM_INIT_AUTOMAKE([foreign]) AC_CONFIG_MACRO_DIR([m4]) LT_INIT @@ -9,7 +9,6 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES(yes)]) COMPILE_ARGS="${ac_configure_args}" -AC_DEFINE_UNQUOTED(COMPILE_ARGS, "$COMPILE_ARGS") dnl Checks for programs. AC_PROG_CC @@ -19,9 +18,7 @@ export PKG_CONFIG_PATH PKG_PROG_PKG_CONFIG -host_os=`uname` -host_cpu=`uname -m` -host_os1=`uname -rs` +AC_CANONICAL_HOST AC_MSG_CHECKING(OS) AC_MSG_RESULT($host_os) @@ -117,6 +114,9 @@ *BSD) AC_DEFINE(BSD, 1) ;; + linux*) + AC_DEFINE(LINUX, 1) + ;; esac dnl cpu specific flags @@ -283,12 +283,13 @@ [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_L2, 1) + COMPILE_ARGS="${COMPILE_ARGS} '--enable-l2'" ] ) AC_MSG_CHECKING([whether to enable IPv6 code]) AC_ARG_ENABLE(ipv6, - [ --enable-ipv6 Enable IPv6 code (default: no)], + [ --enable-ipv6 Enable IPv6 code (default: yes)], [ if test x$enableval = x"yes" ; then AC_MSG_RESULT(yes) @@ -310,8 +311,20 @@ fi ], [ - AC_MSG_RESULT(no) - ipv6support="no" + AC_MSG_RESULT(yes) + AC_CHECK_FUNCS(inet_pton) + if test x"$ac_cv_func_inet_pton" = x"no"; then + AC_MSG_ERROR(ERROR: missing inet_pton(); disable IPv6 hooks !) + fi + + AC_CHECK_FUNCS(inet_ntop) + if test x"$ac_cv_func_inet_ntop" = x"no"; then + AC_MSG_ERROR(ERROR: missing inet_ntop(); disable IPv6 hooks !) + fi + + AC_DEFINE(ENABLE_IPV6, 1) + ipv6support="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-ipv6'" ] ) @@ -331,7 +344,7 @@ ]) AC_ARG_WITH(pcap-includes, - [ --with-pcap-includes=DIR Search the specified directories for header files], + [ --with-pcap-includes=DIR Search the specified directory for header files], [ AC_LINEARIZE_PATH($withval, withval=$absdir) INCLUDES="${INCLUDES} -I$withval" @@ -378,7 +391,7 @@ fi AC_ARG_WITH(pcap-libs, - [ --with-pcap-libs=DIR Search the specified directories for libraries], + [ --with-pcap-libs=DIR Search the specified directory for pcap library], [ AC_LINEARIZE_PATH($withval, withval=$absdir) LIBS="${LIBS} -L$withval" @@ -396,6 +409,7 @@ LIBS="${LIBS} -lpfring -lpcap" AC_MSG_RESULT(yes) PFRING_LIB_FOUND=1 + AC_DEFINE(PFRING_LIB_FOUND, 1) else AC_MSG_RESULT(no) fi @@ -416,6 +430,7 @@ LIBS="${LIBS} -lpfring -lpcap" AC_MSG_RESULT(yes) PFRING_LIB_FOUND=1 + AC_DEFINE(PFRING_LIB_FOUND, 1) else AC_MSG_RESULT(no) fi @@ -432,7 +447,7 @@ ERROR: missing pcap library. Refer to: http://www.tcpdump.org/ ])]) - AC_CHECK_LIB([pcap], [pcap_setnonblock], [ AC_DEFINE(PCAP_7, 1) ], []) + AC_CHECK_LIB([pcap], [pcap_set_protocol], [ AC_DEFINE(PCAP_SET_PROTOCOL, 1) ], []) AC_CHECK_LIB([pcap], [bpf_filter], [ AC_DEFINE(PCAP_NOBPF, 1) ], []) else dnl Unable to test: we should check for these libs @@ -493,7 +508,7 @@ AC_MSG_RESULT(yes) dnl Unfortunately, no pkg-config support for MySQL - AC_CHECK_PROG([MYSQL_CONFIG], [mysql_config], [mysql_config], [no],,) + AC_CHECK_PROG([MYSQL_CONFIG], [mysql_config], [mysql_config], [no]) if test "x${MYSQL_CONFIG}" = "xno"; then AC_MSG_ERROR([ERROR: missing mysql_config program]) fi @@ -502,6 +517,9 @@ MYSQL_LIBS=`$MYSQL_CONFIG --libs`], [AC_MSG_ERROR([ERROR: missing MySQL client library])], [`$MYSQL_CONFIG --libs`]) + dnl version check not enforced with a AC_MSG_ERROR for now + AX_LIB_MYSQL(5.6.3) + AC_SUBST(MYSQL_CFLAGS) AC_SUBST(MYSQL_LIBS) @@ -645,6 +663,7 @@ ) dnl finish: mongodb handling +dnl start: sqlite3 handling AC_MSG_CHECKING(whether to enable SQLite3 support) AC_ARG_ENABLE(sqlite3, [ --enable-sqlite3 Enable SQLite3 support (default: no)], @@ -677,7 +696,7 @@ yes) AC_MSG_RESULT(yes) dnl reasonably old librabbitmq already support pkg-config - PKG_CHECK_MODULES([RABBITMQ], [librabbitmq]) + PKG_CHECK_MODULES([RABBITMQ], [librabbitmq >= 0.8.0]) PLUGINS="${PLUGINS} rabbitmq" USING_RABBITMQ="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $RABBITMQ_CFLAGS" @@ -693,6 +712,28 @@ ) dnl finish: RabbitMQ/AMQP handling +dnl start: ZMQ/AMQP handling +AC_MSG_CHECKING(whether to enable ZMQ/AMQP support) +AC_ARG_ENABLE(zmq, + [ --enable-zmq Enable ZMQ/AMQP support (default: no)], + [ case "$enableval" in + yes) + AC_MSG_RESULT(yes) + PKG_CHECK_MODULES([ZMQ], [libzmq >= 4.2.0]) + SUPPORTS="${SUPPORTS} zmq" + USING_ZMQ="yes" + PMACCT_CFLAGS="$PMACCT_CFLAGS $ZMQ_CFLAGS" + AC_DEFINE(WITH_ZMQ, 1) + ;; + no) + AC_MSG_RESULT(no) + ;; + esac ], + [ + AC_MSG_RESULT(no) + ] +) +dnl finish: ZMQ/AMQP handling dnl start: Kafka handling AC_MSG_CHECKING(whether to enable Kafka support) @@ -701,7 +742,7 @@ [ case "$enableval" in yes) AC_MSG_RESULT(yes) - PKG_CHECK_MODULES([KAFKA], [rdkafka >= 0.8.5],, [ + PKG_CHECK_MODULES([KAFKA], [rdkafka >= 0.9.2],, [ AC_MSG_CHECKING([default locations for librdkafka]) if test -r /usr/lib/librdkafka.a -o -r /usr/lib/librdkafka.so; then KAFKA_LIBS="-L/usr/lib -lrdkafka" @@ -761,7 +802,7 @@ AC_MSG_RESULT(yes) dnl reasonably old Maxmind GeoIP v1 already support pkg-config PKG_CHECK_MODULES([GEOIP], [geoip >= 1.0.0]) - PLUGINS="${PLUGINS} geoip" + SUPPORTS="${SUPPORTS} geoip" USING_MMGEOIP="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $GEOIP_CFLAGS" AC_DEFINE(WITH_GEOIP, 1) @@ -781,7 +822,7 @@ [ case "$enableval" in yes) AC_MSG_RESULT(yes) - PKG_CHECK_MODULES([GEOIPV2], [libmaxminddb >= 1.0.0],, [ + PKG_CHECK_MODULES([GEOIPV2], [libmaxminddb >= 1.2.0],, [ AC_MSG_CHECKING([default locations for libmaxminddb]) if test -r /usr/lib/libmaxminddb.a -o -r /usr/lib/libmaxminddb.so; then GEOIPV2_LIBS="-L/usr/lib -lmaxminddb" @@ -817,7 +858,7 @@ CFLAGS="$_save_CFLAGS" fi ]) - PLUGINS="${PLUGINS} geoipv2" + SUPPORTS="${SUPPORTS} geoipv2" USING_MMGEOIPV2="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $GEOIPV2_CFLAGS" AC_DEFINE(WITH_GEOIPV2, 1) @@ -840,8 +881,8 @@ yes) AC_MSG_RESULT(yes) dnl reasonably old Jansson already support pkg-config - PKG_CHECK_MODULES([JANSSON], [jansson >= 2.2]) - PLUGINS="${PLUGINS} jansson" + PKG_CHECK_MODULES([JANSSON], [jansson >= 2.5]) + SUPPORTS="${SUPPORTS} jansson" USING_JANSSON="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $JANSSON_CFLAGS" AC_DEFINE(WITH_JANSSON, 1) @@ -864,11 +905,12 @@ dnl start: Avro handling AC_MSG_CHECKING(whether to enable Avro support) AC_ARG_ENABLE(avro, - [ --enable-avro Enable avro support (default: no)], + [ --enable-avro Enable Apache Avro support (default: no)], [ case "$enableval" in yes) AC_MSG_RESULT(yes) PKG_CHECK_MODULES([AVRO], [avro-c >= 1.8]) + SUPPORTS="${SUPPORTS} avro" USING_AVRO="yes" PMACCT_CFLAGS="$PMACCT_CFLAGS $AVRO_CFLAGS" AC_DEFINE(WITH_AVRO, 1) @@ -887,6 +929,74 @@ ) dnl finish: Avro handling +dnl start: nDPI handling +AC_ARG_WITH(ndpi-static-lib, + [ --with-ndpi-static-lib=DIR Search the specified directory for nDPI static library], + [ + AC_LINEARIZE_PATH($withval, withval=$absdir) + NDPI_CUST_STATIC_LIB=$withval + ]) + +if test x"$NDPI_CUST_STATIC_LIB" != x""; then + AC_MSG_CHECKING(your own nDPI library) + if test -r $NDPI_CUST_STATIC_LIB/libndpi.a; then + AC_MSG_RESULT(ok) + NDPI_CUST_STATIC_LIB_FOUND="yes" + else + AC_MSG_RESULT(no) + AC_MSG_ERROR(ERROR: unable to find nDPI library in $NDPI_CUST_STATIC_LIB) + fi +fi + +AC_MSG_CHECKING(whether to enable nDPI support) +AC_ARG_ENABLE(ndpi, + [ --enable-ndpi Enable nDPI support (default: no)], + [ case "$enableval" in + yes) + AC_MSG_RESULT(yes) + PKG_CHECK_MODULES([NDPI], [libndpi >= 2.0]) + SUPPORTS="${SUPPORTS} ndpi" + USING_NDPI="yes" + + if test x"$NDPI_CFLAGS" != x""; then + NDPI_CFLAGS_INST=`echo $NDPI_CFLAGS | sed 's/ $//'` + NDPI_CFLAGS_INST="$NDPI_CFLAGS_INST/libndpi" + else + NDPI_CFLAGS_INST="" + fi + PMACCT_CFLAGS="$PMACCT_CFLAGS $NDPI_CFLAGS $NDPI_CFLAGS_INST" + + AC_DEFINE(WITH_NDPI, 1) + _save_LIBS="$LIBS" + LIBS="$LIBS $NDPI_LIBS" + AC_CHECK_LIB(ndpi, ndpi_init_detection_module, [], []) + LIBS="$_save_LIBS" + + dnl XXX: to be improved: avoid linking both static and dynamic libs + if test x"$NDPI_CUST_STATIC_LIB_FOUND" = x"yes"; then + NDPI_LIBS_STATIC="$NDPI_CUST_STATIC_LIB/libndpi.a" + elif test -r /usr/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/lib/libndpi.a" + elif test -r /usr/local/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/local/lib/libndpi.a" + elif test -r /usr/local/nDPI/lib/libndpi.a; then + NDPI_LIBS_STATIC="/usr/local/nDPI/lib/libndpi.a" + else + AC_MSG_ERROR([ERROR: missing nDPI static library]) + fi + + AC_SUBST([NDPI_LIBS_STATIC]) + ;; + no) + AC_MSG_RESULT(no) + ;; + esac ], + [ + AC_MSG_RESULT(no) + ] +) +dnl finish: nDPI handling + if test x"$USING_DLOPEN" = x"yes"; then AC_DEFINE(HAVE_DLOPEN, 1) else @@ -932,6 +1042,7 @@ [ AC_MSG_RESULT(yes) AC_DEFINE(HAVE_64BIT_COUNTERS, 1) + COMPILE_ARGS="${COMPILE_ARGS} '--enable-64bit'" ] ) @@ -971,6 +1082,7 @@ LIBS="${LIBS} -lpthread" USING_THREADPOOL=yes + COMPILE_ARGS="${COMPILE_ARGS} '--enable-threads'" ] ) @@ -990,6 +1102,86 @@ esac ], AC_MSG_RESULT(no)) +AC_MSG_CHECKING(whether to link IPv4/IPv6 traffic accounting accounting binaries) +AC_ARG_ENABLE(traffic-bins, + [ --enable-traffic-bins Link IPv4/IPv6 traffic accounting binaries (default: yes)], + [ + if test x$enableval = x"yes" ; then + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_TRAFFIC_BINS, 1) + USING_TRAFFIC_BINS="yes" + else + AC_MSG_RESULT(no) + fi + ], + [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_TRAFFIC_BINS, 1) + USING_TRAFFIC_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-traffic-bins'" + ] +) + +AC_MSG_CHECKING(whether to link BGP daemon binaries) +AC_ARG_ENABLE(bgp-bins, + [ --enable-bgp-bins Link BGP daemon binaries (default: yes)], + [ + if test x$enableval = x"yes" ; then + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BGP_BINS, 1) + USING_BGP_BINS="yes" + else + AC_MSG_RESULT(no) + fi + ], + [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BGP_BINS, 1) + USING_BGP_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-bgp-bins'" + ] +) + +AC_MSG_CHECKING(whether to link BMP daemon binaries) +AC_ARG_ENABLE(bmp-bins, + [ --enable-bmp-bins Link BMP daemon binaries (default: yes)], + [ + if test x$enableval = x"yes" ; then + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BMP_BINS, 1) + USING_BMP_BINS="yes" + else + AC_MSG_RESULT(no) + fi + ], + [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_BMP_BINS, 1) + USING_BMP_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-bmp-bins'" + ] +) + +AC_MSG_CHECKING(whether to link Streaming Telemetry daemon binaries) +AC_ARG_ENABLE(st-bins, + [ --enable-st-bins Link Streaming Telemetry daemon binaries (default: yes)], + [ + if test x$enableval = x"yes" ; then + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_ST_BINS, 1) + USING_ST_BINS="yes" + else + AC_MSG_RESULT(no) + fi + ], + [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_ST_BINS, 1) + USING_ST_BINS="yes" + COMPILE_ARGS="${COMPILE_ARGS} '--enable-st-bins'" + ] +) + dnl Checks for library functions. AC_TYPE_SIGNAL @@ -997,6 +1189,7 @@ dnl final checks dnl trivial solution to portability issue +AC_DEFINE_UNQUOTED(COMPILE_ARGS, "$COMPILE_ARGS") CFLAGS="${CFLAGS} ${INCLUDES}" INCLUDES="" @@ -1008,6 +1201,7 @@ LIBS ......... : ${LIBS} LDFLAGS ...... : ${LDFLAGS} PLUGINS ...... : ${PLUGINS} +SUPPORTS ..... : ${SUPPORTS} Now type 'make' to compile the source code. @@ -1030,13 +1224,20 @@ AM_CONDITIONAL([WITH_MONGODB], [test x"$USING_MONGODB" = x"yes"]) AM_CONDITIONAL([WITH_SQLITE3], [test x"$USING_SQLITE3" = x"yes"]) AM_CONDITIONAL([WITH_RABBITMQ], [test x"$USING_RABBITMQ" = x"yes"]) +AM_CONDITIONAL([WITH_ZMQ], [test x"$USING_ZMQ" = x"yes"]) AM_CONDITIONAL([WITH_KAFKA], [test x"$USING_KAFKA" = x"yes"]) AM_CONDITIONAL([USING_SQL], [test x"$USING_SQL" = x"yes"]) AM_CONDITIONAL([USING_THREADPOOL], [test x"$USING_THREADPOOL" = x"yes"]) -AM_CONDITIONAL([WITH_NFLOG], [test x"$USING_NFLOG" = x"yes"]) AM_CONDITIONAL([WITH_AVRO], [test x"$USING_AVRO" = x"yes"]) +AM_CONDITIONAL([WITH_NDPI], [test x"$USING_NDPI" = x"yes"]) +AM_CONDITIONAL([WITH_NFLOG], [test x"$USING_NFLOG" = x"yes"]) +AM_CONDITIONAL([USING_TRAFFIC_BINS], [test x"$USING_TRAFFIC_BINS" = x"yes"]) +AM_CONDITIONAL([USING_BGP_BINS], [test x"$USING_BGP_BINS" = x"yes"]) +AM_CONDITIONAL([USING_BMP_BINS], [test x"$USING_BMP_BINS" = x"yes"]) +AM_CONDITIONAL([USING_ST_BINS], [test x"$USING_ST_BINS" = x"yes"]) AC_OUTPUT([ Makefile \ src/Makefile src/nfprobe_plugin/Makefile \ src/sfprobe_plugin/Makefile src/bgp/Makefile \ src/tee_plugin/Makefile src/isis/Makefile \ - src/bmp/Makefile src/telemetry/Makefile ]) + src/bmp/Makefile src/telemetry/Makefile \ + src/ndpi/Makefile ]) diff -Nru pmacct-1.6.1/COPYING pmacct-1.7.0/COPYING --- pmacct-1.6.1/COPYING 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/COPYING 2017-10-20 16:56:19.000000000 +0000 @@ -1,5 +1,5 @@ -pmacct (Promiscuous mode IP Accounting package) -pmacct is Copyright (C) 2003-2016 by Paolo Lucente +pmacct [IP traffic accounting : BGP : BMP : IGP : Streaming Telemetry] +pmacct is Copyright (C) 2003-2017 by Paolo Lucente GNU GENERAL PUBLIC LICENSE diff -Nru pmacct-1.6.1/debian/changelog pmacct-1.7.0/debian/changelog --- pmacct-1.6.1/debian/changelog 2016-11-26 22:39:01.000000000 +0000 +++ pmacct-1.7.0/debian/changelog 2017-10-22 12:39:05.000000000 +0000 @@ -1,3 +1,13 @@ +pmacct (1.7.0-1) unstable; urgency=medium + + * [09997b1] Updated version 1.7.0 from 'upstream/1.7.0' + with Debian dir 7687eb365838d66f3b109a1d516d0fdf991f05d1 + * [8cc2d9a] Add new build-deps and configure flags + * [e14a802] Use libmaxminddb (geoip2) + Thanks to Faidon Liambotis (Closes: #863137) + + -- Bernd Zeimetz Sun, 22 Oct 2017 14:39:05 +0200 + pmacct (1.6.1-1) unstable; urgency=medium * [0497b6e] Merge tag 'upstream/1.6.1' diff -Nru pmacct-1.6.1/debian/control pmacct-1.7.0/debian/control --- pmacct-1.6.1/debian/control 2016-11-26 22:39:01.000000000 +0000 +++ pmacct-1.7.0/debian/control 2017-10-22 12:39:05.000000000 +0000 @@ -9,11 +9,13 @@ default-libmysqlclient-dev, libsqlite3-dev, kfreebsd-kernel-headers[kfreebsd-any], - libgeoip-dev, librabbitmq-dev, libjansson-dev, librdkafka-dev, libnetfilter-log-dev, + libmaxminddb-dev, + libjansson-dev, + libzmq3-dev, autotools-dev, pkg-config, dh-systemd diff -Nru pmacct-1.6.1/debian/rules pmacct-1.7.0/debian/rules --- pmacct-1.6.1/debian/rules 2016-11-26 22:39:01.000000000 +0000 +++ pmacct-1.7.0/debian/rules 2017-10-22 12:39:05.000000000 +0000 @@ -12,21 +12,33 @@ override_dh_auto_configure: dh_auto_configure -- \ - --enable-mmap \ - --enable-pgsql \ --with-pgsql-includes=`pg_config --includedir` \ - --enable-mysql \ - --enable-sqlite3 \ - --enable-ipv6 \ - --enable-v4-mapped \ - --enable-64bit \ - --enable-threads \ - --enable-jansson \ - --enable-geoip \ - --enable-rabbitmq \ - --enable-kafka \ + --enable-l2 \ + --enable-ipv6 \ + --enable-plabel \ + --enable-mysql \ + --enable-pgsql \ + --enable-sqlite3 \ + --enable-rabbitmq \ + --enable-zmq \ + --enable-kafka \ + --enable-geoipv2 \ + --enable-jansson \ + --enable-64bit \ + --enable-threads \ + --enable-traffic-bins \ + --enable-bgp-bins \ + --enable-bmp-bins \ + --enable-st-bins \ $(extra_confflags) + # build-deps not available in debian + # --enable-avro \ + # does not support the current version + # --enable-mongodb \ + # libndpi outdated + # --enable-ndpi \ + override_dh_installinit: dh_installinit --name pmacctd --no-start dh_installinit --name nfacctd --no-start diff -Nru pmacct-1.6.1/docs/INTERNALS pmacct-1.7.0/docs/INTERNALS --- pmacct-1.6.1/docs/INTERNALS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/docs/INTERNALS 2017-10-20 16:56:19.000000000 +0000 @@ -1,4 +1,4 @@ -(poorman's) TABLE OF CONTENTS: +TABLE OF CONTENTS: I. Introduction II. Primitives III. The whole picture @@ -13,13 +13,10 @@ I. Introduction -Giving a quick look to the old 'INTERNALS' textfile, this new one starts with a big step -forward: a rough table of contents, though the document is still not fancy nor formatted. -I'm also conscious the package is still missing its man page. The goal of this document -would be an 'as much as possible' careful description of the development paths, trying to -expose the work done to constructive critics. -Since March 2005, this document is complemented by a paper about an architectural overview -of the project 'pmacct: steps forward interface counters'; the referred paper is available +The goal of this document would be to give extra insight on some of the internals of +pmacct (useful for development or simply constructive critics). Since March 2005, +this document is complemented by a paper about an architectural overview of the +project 'pmacct: steps forward interface counters'; the referred paper is available for download at the pmacct homepage. @@ -74,7 +71,7 @@ | | | [ handle ] [ handle ] [ handle ] [ handle ] [ handle ] | | ... ====[ link layer ]=====[ IP layer ]====[ fragments ]==== [ flows ]==== [ classification ] ... | - | ll.c nl.c ip_frag.c ip_flow.c classifier.c | + | ll.c nl.c ip_frag.c ip_flow.c nDPI library | | | | [ handle ] [ Correlate ] | | ... ====[ maps ]===== [ BGP, IGP ] ... | @@ -217,6 +214,7 @@ memory new chunks of memory are allocated and added to the list during the execution. Using a fixed table places a maximum limit to the number of entries the table is able to store; the following calculation may help in building a fixed table: + ES (Entry Size) ~ 50 bytes NE (Number of entries) @@ -224,6 +222,10 @@ Default values are: imt_mem_pools_number = 16; imt_mem_pools_size = 8192; this will let the default fixed table to contain a maximum of slightly more than 2600 aggregates. +However note the entry size is indicative and can very consistently, ie. depending if +IPv6 or Layer2 are enabled at compile time or whether BGP, MPLS, NAT, etc. primitives +are in use as part of the aggregation key. When a fixed size table is needed, it is +better to constrain it on the size rather than the estimated number of entries to fit. IMT plugin does not rely any way over the realloc() function, but only mmap(). Table grows and shrinks with the help of the above described tracking structures. This is @@ -356,9 +358,10 @@ IX. Classifier and connection tracking engines -pmacct 0.10.0 sees the introduction of new packet/stream classification and connection tracking -features in the pmacctd daemon. Firstly, let's give a look to the global picture; then how they -work: +Classification and connection tracking features were introduced in pmacctd and uacctd daemons as +early as 0.10.0 release. As of pmacct 1.7, classification is switched from the mixed home-grown +implementation + L7 layer project to the nDPI library. Firstly, let's give a look to the global +picture; then how they work: ----[ pmacctd loop ]------------------------------------------------------------- | [ regular ] | @@ -371,7 +374,7 @@ | [ fragment ] [ flow ] [ flow ] [ connection ] | | ... ==>[ handling ]==>[ handling ]==>[ classification ]==>[ tracking ]==> ... | | [ engine ] [ engine ] [ engine ] [ engine ] | - | ip_frag.c ip_flow.c classifier.c \ conntrack.c | + | ip_frag.c ip_flow.c nDPI library \ conntrack.c | | | \___ | | \ \ | | \ [ shared ] | @@ -393,11 +396,9 @@ a protocol which is known to be based over a control channel (ie. FTP, RTSP, SIP, H.323, etc.). However, some protocols (ie. MSN messenger) spawn data channels that can still be distinguished because of some regular patterns into the payload; in such cases a classificator exists rather -than a tracking module. Connection tracking modules are C routines statically compiled into the -collector code that hint IP address/port couples for upcoming data streams as signalled by one -of the parties into the control channel; such information fragments are then meant to classify -the new data streams; classification patterns are either regular expressions (RE) or pluggable -shared objects (SO, written in C), both loaded at runtime. +than a tracking module. Connection tracking modules hint IP address/port couples for upcoming +data streams as signalled by one of the parties into the control channel; such pieces of +information are then meant to classify the new data streams. In this context, 'snaplen' directive, which specifies the maximum number of bytes to capture for each packet, has key importance. In fact, some protocols (mostly text-based eg. RTSP, SIP, etc.) benefit of extra bytes because they give more chances to identify new data streams spawned by diff -Nru pmacct-1.6.1/docs/MSGLOG_DUMP_FORMATS pmacct-1.7.0/docs/MSGLOG_DUMP_FORMATS --- pmacct-1.6.1/docs/MSGLOG_DUMP_FORMATS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/docs/MSGLOG_DUMP_FORMATS 2017-10-20 16:56:19.000000000 +0000 @@ -5,100 +5,217 @@ introduced. BGP msglog format: -* log_init message: - {"seq": , "timestamp": , "peer_ip_src": , \ - "event_type": "log_init"} - -* log message: - {"seq": , "timestamp": , "peer_ip_src": , \ - "event_type": "log", "log_type": <"update", "withdraw", "delete">, \ - } - -* log_close message: - {"seq": , "timestamp": , "peer_ip_src": , \ - "event_type": "log_close"} +- log_init message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "log_init" + } + +- log message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "log", + "afi": , + "safi": , + "log_type": <"update", "withdraw", "delete">, + + } + +- log_close message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "log_close" + } BGP dump format: -* dump_init message: - {"timestamp": , "peer_ip_src": , \ - "event_type": "dump_init", "dump_period": } - -* dump message: - {"timestamp": , "peer_ip_src": , \ - "event_type": "dump", } - -* dump_close message: - {"timestamp": , "peer_ip_src": , \ - "event_type": "dump_close"} +- dump_init message: + { + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "dump_init", + "dump_period": + } + +- dump message: + { + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "dump", + "afi": , + "safi": , + + } + +- dump_close message: + { + "writer_id": "/" (*), + "timestamp": , + "peer_ip_src": , + "event_type": "dump_close" + } BMP msglog format: -* log_init message: - {"seq": , "timestamp": , "bmp_router": , \ - "event_type": "log_init"} - -* log routes message: - {"seq": , "timestamp": , "bmp_router": , \ - "event_type": "log", "bmp_msg_type": "route_monitor", "log_type": <"update", \ - "withdraw", "delete">, "peer_ip": , } - -* log events message: - {"seq": , "timestamp": , "bmp_router": , \ - "event_type": "log", "bmp_msg_type": <"init", "term", "peer_up", "stats", \ - "peer_down">, "peer_ip": , } - -* log_close message: - {"seq": , "timestamp": , "bmp_router": , \ - "event_type": "log_close"} +- log_init message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "log_init" + } + +- log routes message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "log", + "afi": , + "safi": , + "bmp_msg_type": "route_monitor", "log_type": <"update", "withdraw", "delete">, + "peer_ip": , + + } + +- log events message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "log", + "bmp_msg_type": <"init", "term", "peer_up", "stats", "peer_down">, + "peer_ip": , + + } + +- log_close message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "log_close" + } BMP dump format: -* dump_init message: - {"timestamp": , "bmp_router": , \ - "event_type": "dump_init", "dump_period": } - -* dump routes message: - {"timestamp": , "bmp_router": , \ - "bmp_msg_type": "route_monitor", "event_type": "dump", \ - "peer_ip": , } - -* dump events message: - {"seq": , "timestamp": , "bmp_router": , \ - "event_type": "dump", "event_timestamp": , "bmp_msg_type": \ - <"init", "term", "peer_up", "stats", "peer_down">, "peer_ip": , \ - } - -* dump_close message: - {"timestamp": , "bmp_router": , \ - "event_type": "dump_close"} +- dump_init message: + { + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "dump_init", + "dump_period": + } + +- dump routes message: + { + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "bmp_msg_type": "route_monitor", + "event_type": "dump", + "afi": , + "safi": , + "peer_ip": , + + } + +- dump events message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "dump", + "event_timestamp": , + "bmp_msg_type": <"init", "term", "peer_up", "stats", "peer_down">, + "peer_ip": , + + } + +- dump_close message: + { + "writer_id": "/" (*), + "timestamp": , + "bmp_router": , + "event_type": "dump_close" + } Streaming Telemetry msglog format: -* log_init message: - {"seq": , "timestamp": , "telemetry_node": , \ - "event_type": "log_init"} - -* log message: - {"seq": , "timestamp": , "telemetry_node": , \ - "event_type": "log", "telemetry_port": , "serialization": \ - <"json" | "gpb">, "telemetry_data": } - -* log_close message: - {"seq": , "timestamp": , "telemetry_node": , \ - "event_type": "log_close"} +- log_init message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "telemetry_node": , + "event_type": "log_init" + } + +- log message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "telemetry_node": , + "event_type": "log", + "telemetry_port": , + "serialization": <"json" | "gpb">, + "telemetry_data": + } + +- log_close message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "telemetry_node": , + "event_type": "log_close" + } Streaming Telemetry dump format: -* dump_init message: - {"timestamp": , "telemetry_node": , \ - "event_type": "dump_init", "dump_period": } - -* dump message: - {"seq": , "timestamp": , "telemetry_node": \ - , "event_type": "dump", "telemetry_port": , \ - "serialization": <"json" | "gpb">, "telemetry_data": } - -* dump_close message: - {"timestamp": , "telemetry_node": , \ - "event_type": "dump_close"} +- dump_init message: + { + "timestamp": , + "telemetry_node": , + "event_type": "dump_init", + "dump_period": + } + +- dump message: + { + "seq": , + "writer_id": "/" (*), + "timestamp": , + "telemetry_node": , + "event_type": "dump", + "telemetry_port": , + "serialization": <"json" | "gpb">, + "telemetry_data": + } + +- dump_close message: + { + "writer_id": "/" (*), + "timestamp": , + "telemetry_node": , + "event_type": "dump_close" + } + +(*) Field included only when writing to a RabbitMQ or Kafka broker diff -Nru pmacct-1.6.1/docs/SIGNALS pmacct-1.7.0/docs/SIGNALS --- pmacct-1.6.1/docs/SIGNALS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/docs/SIGNALS 2017-10-20 16:56:19.000000000 +0000 @@ -1,10 +1,13 @@ SIGNALS: -Here follows a list of supported signals and their meaning; note: pmacct core -process says goodbye when its last child dies or is terminated. +Here follows a list of supported signals and their meaning. If a signal is +not being properly delivered to the daemon, and this is on a system running +SELinux, check for SELinux interferences. Core process: -SIGCHLD: used to handle gracefully his loved child processes; +SIGCHLD: used to handle gracefully his loved child processes. This is + internal, ie. should not be sent by users. To end gracefully + the daemon, look at SIGINT; SIGHUP: reopens the logging infrastructure. Works with both syslog and logfiles; it also works with streamed logging of BGP messages/ events (bgp_daemon_msglog_file), streamed logging of BMP data/ @@ -17,12 +20,12 @@ SIGUSR2: if 'maps_refresh' config directive is enabled, it causes maps to be reloaded (ie. pre_tag_map, bgp_agent_map, etc.). If also indexing is enabled, ie. maps_index, indexes are re-compited. -SIGINT: if starting pmacct in foreground the signal is propagated to - each running plugin, which is in turn gracefully terminated; - if starting pmacct in background, this signal is ignored by - the Core process but not from the plugins: it is recommended - to send the signal to all plugins, ie. "killall -INT pmacctd" - so to let the whole pmacct instance exit gracefully. +SIGINT: the signal is used by the Core Process itself and propagated + to each running plugin for graceful termination (ie. send BGP + NOTIFICATION message to established BGP sessions, close open + files, remove PID files, purge data, etc.). See Q16 of the + FAQS document for recommendations on how to best send SIGINT + signals to the daemon; SIGTERM: not handled (which means it follows the default behaviour for the OS) if the daemon is started in background; otherwise it orks like SIGINT; diff -Nru pmacct-1.6.1/docs/TRIGGER_VARS pmacct-1.7.0/docs/TRIGGER_VARS --- pmacct-1.6.1/docs/TRIGGER_VARS 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/docs/TRIGGER_VARS 2017-10-20 16:56:19.000000000 +0000 @@ -1,14 +1,13 @@ INTRODUCTION -An executable triggering mechanism feature is part of all SQL plugins -(sql_trigger_exec). Executables may either be spawned each time a cache -purging event occurs or at arbitrary time intervals (that are specified -via sql_trigger_time). Because the triggering mechanism is hooked on top -of the 'lazy deadlines' plugin concept, it should not be preferred method -to run tasks strictly connected to timing issues (use crontab instead). -As a recap, the concept of lazy deadlines was introduced a while ago to -avoid large use of UNIX signals for precise time handling. Information -is being passed to the triggered executable in the form of environment -variables. The list of supported variables follows: +A feature to spawn external executables is part of all pmacct plugins +(ie. sql_trigger_exec, print_trigger_exec, etc). In case of SQL plugins, +executables may either be spawned each time a cache purging event occurs +or at arbitrary time intervals (specified via sql_trigger_time); in all +other plugins a trigger can be spawned only at a cache purging event. +For time-sensitive triggers it is recommended to use crontab instead. +Also, in case of SQL plugins some information is being passed to the +triggered executable in the form of environment variables. The list of +supported variables follows: VAR: $SQL_DB DESC: RDBMS database name. diff -Nru pmacct-1.6.1/examples/agent_to_peer.map.example pmacct-1.7.0/examples/agent_to_peer.map.example --- pmacct-1.6.1/examples/agent_to_peer.map.example 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/examples/agent_to_peer.map.example 2017-10-20 16:56:19.000000000 +0000 @@ -8,7 +8,7 @@ ! ! list of currently supported keys follow: ! -! 'bgp_ip' LOOKUP: IPv4/IPv6 session address or router ID of the +! 'bgp_ip' LOOKUP: IPv4/IPv6 session address or Router ID of the ! BGP peer. ! 'bgp_port' LOOKUP: TCP port used by the BGP peer to establish the ! session, useful in NAT traversal scenarios. @@ -24,12 +24,12 @@ ! 'filter' MATCH: incoming data is compared against the supplied ! filter expression (expected in libpcap syntax); the ! filter needs to be enclosed in quotes ('). In this map -! this is meant to discriminate among IPv4 ('ip') and -! IPv6 ('ip6') traffic. +! this is meant to discriminate among IPv4 ('ip', 'vlan +! and ip') and IPv6 ('ip6', 'vlan and ip6') traffic. ! ! A couple of straightforward examples follow. ! -bgp_ip=1.2.3.4 ip=2.3.4.5 +bgp_ip=1.2.3.4 ip=2.3.4.5 ! ! The following maps something which any Netflow/sFlow agent to the specified ! BGP peer. This syntax applies also to non-telemetry daemons, ie. pmacctd and @@ -38,7 +38,16 @@ ! bgp_ip=4.5.6.7 ip=0.0.0.0/0 ! ! The following maps flows ingressing a specific interface of the NetFlow/sFlow -! agent to the specified BGP peer. This is relevant to VPN scenarios. +! agent to the specified BGP peer. This may be relevant to MPLS VPN scenarios. ! -bgp_ip=1.2.3.4 ip=2.3.4.5 in=100 +! bgp_ip=1.2.3.4 ip=2.3.4.5 in=100 ! +! In scenarios where there are distinct v4 and v6 BGP sessions with the same +! peer (by design or due to distinct BGP agents for v4 and v6), traffic can +! be directed onto the right session with a filter. pmacct needs somehow to +! distinguish the sessions to make the correlation properly work: if the IP +! address of the BGP sessions is the same, ie. pmacct is co-located with the +! BGP agent, the peers will need to have a different Router ID configured: +! +! bgp_ip=4.0.0.1 ip=0.0.0.0/0 filter='ip or (vlan and ip)' +! bgp_ip=6.0.0.1 ip=0.0.0.0/0 filter='ip6 or (vlan and ip6)' diff -Nru pmacct-1.6.1/examples/amqp/amqp_receiver.py pmacct-1.7.0/examples/amqp/amqp_receiver.py --- pmacct-1.6.1/examples/amqp/amqp_receiver.py 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/examples/amqp/amqp_receiver.py 2017-10-20 16:56:19.000000000 +0000 @@ -1,17 +1,20 @@ #!/usr/bin/env python # -# If missing 'pika' read how to download it at: +# Pika is a pure-Python implementation of the AMQP 0-9-1 protocol and +# is available at: +# https://pypi.python.org/pypi/pika # http://www.rabbitmq.com/tutorials/tutorial-one-python.html # +# UltraJSON, an ultra fast JSON encoder and decoder, is available at: +# https://pypi.python.org/pypi/ujson +# +# The Apache Avro Python module is available at: +# https://avro.apache.org/docs/1.8.1/gettingstartedpython.html +# # Binding to the routing key specified by amqp_routing_key (by default 'acct') # allows to receive messages published by an 'amqp' plugin, in JSON format. # Similarly for BGP daemon bgp_*_routing_key and BMP daemon bmp_*_routing_key. # -# Binding to the routing key specified by plugin_pipe_amqp_routing_key (by -# default 'core_proc_name-$plugin_name-$plugin_type') allows to receive a copy -# of messages published by the Core Process to a specific plugin; the messages -# are in binary format, first quad being the sequence number. -# # Binding to the reserved exchange 'amq.rabbitmq.trace' and to routing keys # 'publish.pmacct' or 'deliver.' allows to receive a copy of the # messages that published via a specific exchange or delivered to a specific @@ -20,8 +23,17 @@ # # 'rabbitmqctl trace_on' enables RabbitMQ Firehose tracer # 'rabbitmqctl list_queues' lists declared queues +# +# Two pipelines are supported in this script: +# * RabbitMQ -> REST API +# * RabbitMQ -> stdout +# +# Two data encoding formats are supported in this script: +# * JSON +# * Apache Avro -import sys, os, getopt, pika, StringIO +import sys, os, getopt, pika, StringIO, time +import ujson as json try: import avro.io @@ -32,6 +44,14 @@ avro_available = False avro_schema = None +http_url_post = None +print_stdout = 0 +print_stdout_num = 0 +print_stdout_max = 0 +convert_to_json_array = 0 +stats_interval = 0 +time_count = 0 +elem_count = 0 def usage(tool): print "" @@ -46,27 +66,114 @@ print "Optional Args:" print " -h, --help".ljust(25) + "Print this help" print " -H, --host".ljust(25) + "Define RabbitMQ broker host [default: 'localhost']" + print " -p, --print".ljust(25) + "Print data to stdout" + print " -n, --num".ljust(25) + "Number of rows to print to stdout [default: 0, ie. forever]" + print " -u, --url".ljust(25) + "Define a URL to HTTP POST data to" + print " -a, --to-json-array".ljust(25) + "Convert list of newline-separated JSON objects in a JSON array" + print " -s, --stats-interval".ljust(25) + "Define a time interval, in secs, to get statistics to stdout" if avro_available: print " -d, --decode-with-avro".ljust(25) + "Define the file with the " \ "schema to use for decoding Avro messages" +def post_to_url(http_req, value): + try: + urllib2.urlopen(http_req, value) + except urllib2.HTTPError, err: + print "WARN: urlopen() returned HTTP error code:", err.code + sys.stdout.flush() + except urllib2.URLError, err: + print "WARN: urlopen() returned URL error reason:", err.reason + sys.stdout.flush() + def callback(ch, method, properties, body): + global avro_schema + global http_url_post + global print_stdout + global print_stdout_num + global print_stdout_max + global convert_to_json_array + global stats_interval + global time_count + global elem_count + + # + # XXX: data enrichments, manipulations, correlations, etc. go here + # + + if stats_interval: + time_now = int(time.time()) + if avro_schema: inputio = StringIO.StringIO(body) decoder = avro.io.BinaryDecoder(inputio) datum_reader = avro.io.DatumReader(avro_schema) + avro_data = [] while inputio.tell() < len(inputio.getvalue()): x = datum_reader.read(decoder) avro_data.append(str(x)) - print " [x] Received %r" % (",".join(avro_data),) + + if stats_interval: + elem_count += len(avro_data) + + if print_stdout: + print " [x] Received %r" % (",".join(avro_data),) + sys.stdout.flush() + print_stdout_num += 1 + if (print_stdout_max == print_stdout_num): + sys.exit(0) + + if http_url_post: + http_req = urllib2.Request(http_url_post) + http_req.add_header('Content-Type', 'application/json') + post_to_url(http_req, ("\n".join(avro_data))) else: - print " [x] Received %r" % (body,) + value = body + + if stats_interval: + elem_count += value.count('\n') + elem_count += 1 + + if convert_to_json_array: + value = "[" + value + "]" + value = value.replace('\n', ',\n') + value = value.replace(',\n]', ']') + + if print_stdout: + print " [x] Received %r" % (value,) + sys.stdout.flush() + print_stdout_num += 1 + if (print_stdout_max == print_stdout_num): + sys.exit(0) + + if http_url_post: + http_req = urllib2.Request(http_url_post) + http_req.add_header('Content-Type', 'application/json') + post_to_url(http_req, value) + + if stats_interval: + if time_now >= (time_count + stats_interval): + print("INFO: stats: [ interval=%d records=%d ]" % (stats_interval, elem_count)) + sys.stdout.flush() + time_count = time_now + elem_count = 0 def main(): + global avro_schema + global http_url_post + global print_stdout + global print_stdout_num + global print_stdout_max + global convert_to_json_array + global stats_interval + global time_count + global elem_count + try: - opts, args = getopt.getopt(sys.argv[1:], "he:k:q:H:d:", ["help", "exchange=", - "routing_key=", "queue=", "host=", "decode-with-avro="]) + opts, args = getopt.getopt(sys.argv[1:], "he:k:q:H:u:d:pn:as:", ["help", + "exchange=", "routing_key=", "queue=", "host=", "url=", + "decode-with-avro=", "print=", "num=", "to-json-array=", + "stats-interval="]) except getopt.GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" @@ -95,6 +202,19 @@ amqp_queue = a elif o in ("-H", "--host"): amqp_host = a + elif o in ("-u", "--url"): + http_url_post = a + elif o in ("-p", "--print"): + print_stdout = 1 + elif o in ("-n", "--num"): + print_stdout_max = int(a) + elif o in ("-a", "--to-json-array"): + convert_to_json_array = 1 + elif o in ("-s", "--stats-interval"): + stats_interval = int(a) + if stats_interval < 0: + sys.stderr.write("ERROR: `--stats-interval` must be positive\n") + sys.exit(1) elif o in ("-d", "--decode-with-avro"): if not avro_available: sys.stderr.write("ERROR: `--decode-with-avro` given but Avro package was " @@ -105,8 +225,6 @@ sys.stderr.write("ERROR: '%s' does not exist or is not a file\n" % (a,)) sys.exit(1) - global avro_schema - with open(a) as f: avro_schema = avro.schema.parse(f.read()) else: @@ -128,8 +246,14 @@ channel.queue_bind(exchange=amqp_exchange, routing_key=amqp_routing_key, queue=amqp_queue) - print ' [*] Example inspired from: http://www.rabbitmq.com/getstarted.html' - print ' [*] Waiting for messages on E =', amqp_exchange, ',', amqp_type, 'RK =', amqp_routing_key, 'Q =', amqp_queue, 'H =', amqp_host, '. Edit code to change any parameter. To exit press CTRL+C' + if print_stdout: + print ' [*] Example inspired from: http://www.rabbitmq.com/getstarted.html' + print ' [*] Waiting for messages on E =', amqp_exchange, ',', amqp_type, 'RK =', amqp_routing_key, 'Q =', amqp_queue, 'H =', amqp_host, '. Edit code to change any parameter. To exit press CTRL+C' + sys.stdout.flush() + + if stats_interval: + elem_count = 0 + time_count = int(time.time()) channel.basic_consume(callback, queue=amqp_queue, no_ack=True) diff -Nru pmacct-1.6.1/examples/avro/avro_file_decoder.py pmacct-1.7.0/examples/avro/avro_file_decoder.py --- pmacct-1.6.1/examples/avro/avro_file_decoder.py 1970-01-01 00:00:00.000000000 +0000 +++ pmacct-1.7.0/examples/avro/avro_file_decoder.py 2017-10-20 16:56:19.000000000 +0000 @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# +# If missing 'avro' read how to download it at: +# https://avro.apache.org/docs/1.8.1/gettingstartedpython.html + +import sys, os, getopt, io +from avro.datafile import DataFileReader +from avro.io import DatumReader +import avro.schema + +def usage(tool): + print "" + print "Usage: %s [Args]" % tool + print "" + + print "Mandatory Args:" + print " -i, --input-file".ljust(25) + "Input file in Avro format" + print " -s, --schema".ljust(25) + "Schema to decode input file (if not included)" + print "" + print "Optional Args:" + print " -h, --help".ljust(25) + "Print this help" + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], "hi:s:", ["help", "input-file=", + "schema="]) + except getopt.GetoptError as err: + # print help information and exit: + print str(err) # will print something like "option -a not recognized" + usage(sys.argv[0]) + sys.exit(2) + + avro_file = None + avro_schema_file = None + + required_cl = 0 + + for o, a in opts: + if o in ("-h", "--help"): + usage(sys.argv[0]) + sys.exit() + elif o in ("-i", "--input-file"): + required_cl += 1 + avro_file = a + elif o in ("-s", "--schema"): + avro_schema_file = a + else: + assert False, "unhandled option" + + if (required_cl < 1): + print "ERROR: Missing required argument" + usage(sys.argv[0]) + sys.exit(1) + + if not avro_schema_file: + reader = DataFileReader(open(avro_file, "r"), DatumReader()) + for datum in reader: + print datum + reader.close() + else: + reader_schema = open(avro_schema_file, "r") + avro_schema = reader_schema.read() + reader_schema.close() + parsed_avro_schema = avro.schema.parse(avro_schema) + + with open(avro_file, "rb") as reader_data: + inputio = io.BytesIO(reader_data.read()) + decoder = avro.io.BinaryDecoder(inputio) + reader = avro.io.DatumReader(parsed_avro_schema) + while inputio.tell() < len(inputio.getvalue()): + avro_datum = reader.read(decoder) + print avro_datum + reader_data.close() + +if __name__ == "__main__": + main() diff -Nru pmacct-1.6.1/examples/flow_to_rd.map.example pmacct-1.7.0/examples/flow_to_rd.map.example --- pmacct-1.6.1/examples/flow_to_rd.map.example 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/examples/flow_to_rd.map.example 2017-10-20 16:56:19.000000000 +0000 @@ -22,6 +22,9 @@ ! MPLS-enabled networks this can be also matched against ! top label address where available (ie. egress NetFlow ! v9/IPFIX exports). +! 'mpls_vpn_id' MATCH: MPLS VPN ID. A positive 32-bit unsigned integer +! is expected as value. In NetFlow/IPFIX this is compared +! against field types #234 and #235. ! 'mpls_label_bottom' MATCH: MPLS bottom label value. ! ! A couple of straightforward examples follow. diff -Nru pmacct-1.6.1/examples/kafka/kafka_confluent_consumer.py pmacct-1.7.0/examples/kafka/kafka_confluent_consumer.py --- pmacct-1.6.1/examples/kafka/kafka_confluent_consumer.py 1970-01-01 00:00:00.000000000 +0000 +++ pmacct-1.7.0/examples/kafka/kafka_confluent_consumer.py 2017-10-20 16:56:19.000000000 +0000 @@ -0,0 +1,183 @@ +#!/usr/bin/env python +# +# Confluent Kafka Python module is available at: +# https://github.com/confluentinc/confluent-kafka-python +# +# UltraJSON, an ultra fast JSON encoder and decoder, is available at: +# https://pypi.python.org/pypi/ujson +# +# Binding to the topic specified by kafka_topic (by default 'acct') allows to +# receive messages published by a 'kafka' plugin, in JSON format. Similarly for +# BGP daemon bgp_*_topic and BMP daemon bmp_*_topic. +# +# Three pipelines are supported in this script: +# * Kafka -> Kafka +# * Kafka -> REST API +# * Kafka -> stdout +# +# A single data encoding format is supported in this script: +# * JSON + +import sys, os, getopt, StringIO, time, urllib2 +import confluent_kafka +import ujson as json +import uuid + +def usage(tool): + print "" + print "Usage: %s [Args]" % tool + print "" + + print "Mandatory Args:" + print " -t, --topic".ljust(25) + "Define the topic to consume from" + print "" + print "Optional Args:" + print " -h, --help".ljust(25) + "Print this help" + print " -g, --group_id".ljust(25) + "Specify the consumer Group ID" + print " -e, --earliest".ljust(25) + "Set consume topic offset to 'earliest' [default: 'latest']" + print " -H, --host".ljust(25) + "Define Kafka broker host [default: '127.0.0.1:9092']" + print " -p, --print".ljust(25) + "Print data to stdout" + print " -n, --num".ljust(25) + "Number of rows to print to stdout [default: 0, ie. forever]" + print " -T, --produce-topic".ljust(25) + "Define a topic to produce to" + print " -u, --url".ljust(25) + "Define a URL to HTTP POST data to" + print " -a, --to-json-array".ljust(25) + "Convert list of newline-separated JSON objects in a JSON array" + print " -s, --stats-interval".ljust(25) + "Define a time interval, in secs, to get statistics to stdout" + + +def post_to_url(http_req, value): + try: + urllib2.urlopen(http_req, value) + except urllib2.HTTPError, err: + print "WARN: urlopen() returned HTTP error code:", err.code + sys.stdout.flush() + except urllib2.URLError, err: + print "WARN: urlopen() returned URL error reason:", err.reason + sys.stdout.flush() + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], "ht:T:pin:g:H:d:eu:as:r:", ["help", "topic=", + "group_id=", "host=", "earliest=", "url=", "produce-topic=", "print=", + "num=", "to-json-array=", "stats-interval="]) + except getopt.GetoptError as err: + # print help information and exit: + print str(err) # will print something like "option -a not recognized" + usage(sys.argv[0]) + sys.exit(2) + + mypid = os.getpid() + kafka_topic = None + kafka_group_id = uuid.uuid1() + kafka_host = "127.0.0.1:9092" + kafka_produce_topic = None + topic_offset = "latest" + http_url_post = None + print_stdout = 0 + print_stdout_num = 0 + print_stdout_max = 0 + convert_to_json_array = 0 + stats_interval = 0 + + required_cl = 0 + + for o, a in opts: + if o in ("-h", "--help"): + usage(sys.argv[0]) + sys.exit() + elif o in ("-t", "--topic"): + required_cl += 1 + kafka_topic = a + elif o in ("-T", "--produce-topic"): + kafka_produce_topic = a + elif o in ("-p", "--print"): + print_stdout = 1 + elif o in ("-n", "--num"): + print_stdout_max = int(a) + elif o in ("-g", "--group_id"): + kafka_group_id = a + elif o in ("-H", "--host"): + kafka_host = a + elif o in ("-e", "--earliest"): + topic_offset = "earliest" + elif o in ("-u", "--url"): + http_url_post = a + elif o in ("-a", "--to-json-array"): + convert_to_json_array = 1 + elif o in ("-s", "--stats-interval"): + stats_interval = int(a) + if stats_interval < 0: + sys.stderr.write("ERROR: `-s`, `--stats-interval` must be positive\n") + sys.exit(1) + else: + assert False, "unhandled option" + + if required_cl < 1: + print "ERROR: Missing required arguments" + usage(sys.argv[0]) + sys.exit(1) + + consumer_conf = { 'bootstrap.servers': kafka_host, + 'group.id': kafka_group_id, + 'default.topic.config': { + 'auto.offset.reset': topic_offset + } + } + + consumer = confluent_kafka.Consumer(**consumer_conf) + consumer.subscribe([kafka_topic]) + + producer_conf = { 'bootstrap.servers': kafka_host } + if kafka_produce_topic: + producer = confluent_kafka.Producer(**producer_conf) + + if stats_interval: + elem_count = 0 + time_count = int(time.time()) + + while True: + message = consumer.poll() + value = message.value().decode('utf-8') + + # + # XXX: data enrichments, manipulations, correlations, etc. go here + # + + if stats_interval: + time_now = int(time.time()) + + if len(value): + if stats_interval: + elem_count += 1 + + if convert_to_json_array: + value = "[" + value + "]" + value = value.replace('\n', ',\n') + value = value.replace(',\n]', ']') + + if print_stdout: + print("%s:%d:%d: pid=%d key=%s value=%s" % (message.topic(), message.partition(), + message.offset(), mypid, str(message.key()), value)) + sys.stdout.flush() + print_stdout_num += 1 + if (print_stdout_max == print_stdout_num): + sys.exit(0) + + if http_url_post: + http_req = urllib2.Request(http_url_post) + http_req.add_header('Content-Type', 'application/json') + post_to_url(http_req, value) + + if kafka_produce_topic: + producer.produce(kafka_produce_topic, value) + producer.poll(0) + + if stats_interval: + if time_now >= (time_count + stats_interval): + print("INFO: stats: [ time=%s interval=%d records=%d pid=%d ]" % + (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_now)), stats_interval, elem_count), mypid) + sys.stdout.flush() + time_count = time_now + elem_count = 0 + +if __name__ == "__main__": + main() diff -Nru pmacct-1.6.1/examples/kafka/kafka_consumer.py pmacct-1.7.0/examples/kafka/kafka_consumer.py --- pmacct-1.6.1/examples/kafka/kafka_consumer.py 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/examples/kafka/kafka_consumer.py 2017-10-20 16:56:19.000000000 +0000 @@ -1,19 +1,31 @@ #!/usr/bin/env python # -# If missing 'kafka' read how to download it at: +# It is recommended to run the Kafka Python module against Python 2.7+. The +# module is available at: # http://kafka-python.readthedocs.org/ # +# UltraJSON, an ultra fast JSON encoder and decoder, is available at: +# https://pypi.python.org/pypi/ujson +# +# The Apache Avro Python module is available at: +# https://avro.apache.org/docs/1.8.1/gettingstartedpython.html +# # Binding to the topic specified by kafka_topic (by default 'acct') allows to # receive messages published by a 'kafka' plugin, in JSON format. Similarly for # BGP daemon bgp_*_topic and BMP daemon bmp_*_topic. # -# Binding to the topic specified by plugin_pipe_kafka_topic (by default -# 'core_proc_name-$plugin_name-$plugin_type') allows to receive a copy of -# messages produced by the Core Process to a specific plugin; the messages are -# in binary format, first quad being the sequence number. - -import sys, os, getopt, StringIO -from kafka import KafkaConsumer +# Three pipelines are supported in this script: +# * Kafka -> Kafka +# * Kafka -> REST API +# * Kafka -> stdout +# +# Two data encoding formats are supported in this script: +# * JSON +# * Apache Avro + +import sys, os, getopt, StringIO, time, urllib2 +from kafka import KafkaConsumer, KafkaProducer +import ujson as json try: import avro.io @@ -31,29 +43,57 @@ print "" print "Mandatory Args:" - print " -t, --topic".ljust(25) + "Define the topic to use" - print " -g, --group_id".ljust(25) + "Specify the Group ID to declare" + print " -t, --topic".ljust(25) + "Define the topic to consume from" print "" print "Optional Args:" print " -h, --help".ljust(25) + "Print this help" + print " -g, --group_id".ljust(25) + "Specify the consumer Group ID" + print " -e, --earliest".ljust(25) + "Set consume topic offset to 'earliest' [default: 'latest']" print " -H, --host".ljust(25) + "Define Kafka broker host [default: '127.0.0.1:9092']" + print " -p, --print".ljust(25) + "Print data to stdout" + print " -n, --num".ljust(25) + "Number of rows to print to stdout [default: 0, ie. forever]" + print " -T, --produce-topic".ljust(25) + "Define a topic to produce to" + print " -u, --url".ljust(25) + "Define a URL to HTTP POST data to" + print " -a, --to-json-array".ljust(25) + "Convert list of newline-separated JSON objects in a JSON array" + print " -s, --stats-interval".ljust(25) + "Define a time interval, in secs, to get statistics to stdout" if avro_available: print " -d, --decode-with-avro".ljust(25) + "Define the file with the " \ "schema to use for decoding Avro messages" +def post_to_url(http_req, value): + try: + urllib2.urlopen(http_req, value) + except urllib2.HTTPError, err: + print "WARN: urlopen() returned HTTP error code:", err.code + sys.stdout.flush() + except urllib2.URLError, err: + print "WARN: urlopen() returned URL error reason:", err.reason + sys.stdout.flush() + def main(): try: - opts, args = getopt.getopt(sys.argv[1:], "ht:g:H:d:", ["help", "topic=", - "group_id=", "host=", "decode-with-avro="]) + opts, args = getopt.getopt(sys.argv[1:], "ht:T:pn:g:H:d:eu:as:", ["help", "topic=", + "group_id=", "host=", "decode-with-avro=", "earliest=", "url=", + "produce-topic=", "print=", "num=", "to-json-array=", + "stats-interval="]) except getopt.GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" usage(sys.argv[0]) sys.exit(2) + mypid = os.getpid() kafka_topic = None kafka_group_id = None kafka_host = "127.0.0.1:9092" + kafka_produce_topic = None + topic_offset = "latest" + http_url_post = None + print_stdout = 0 + print_stdout_num = 0 + print_stdout_max = 0 + convert_to_json_array = 0 + stats_interval = 0 required_cl = 0 @@ -64,15 +104,30 @@ elif o in ("-t", "--topic"): required_cl += 1 kafka_topic = a + elif o in ("-T", "--produce-topic"): + kafka_produce_topic = a + elif o in ("-p", "--print"): + print_stdout = 1 + elif o in ("-n", "--num"): + print_stdout_max = int(a) elif o in ("-g", "--group_id"): - required_cl += 1 kafka_group_id = a elif o in ("-H", "--host"): kafka_host = a + elif o in ("-e", "--earliest"): + topic_offset = "earliest" + elif o in ("-u", "--url"): + http_url_post = a + elif o in ("-a", "--to-json-array"): + convert_to_json_array = 1 + elif o in ("-s", "--stats-interval"): + stats_interval = int(a) + if stats_interval < 0: + sys.stderr.write("ERROR: `--stats-interval` must be positive\n") + sys.exit(1) elif o in ("-d", "--decode-with-avro"): if not avro_available: - sys.stderr.write("ERROR: `--decode-with-avro` given but Avro package was " - "not found\n") + sys.stderr.write("ERROR: `--decode-with-avro` given but Avro package was not found\n") sys.exit(1) if not os.path.isfile(a): @@ -86,27 +141,85 @@ else: assert False, "unhandled option" - if (required_cl < 2): + if required_cl < 1: print "ERROR: Missing required arguments" usage(sys.argv[0]) sys.exit(1) - consumer = KafkaConsumer(kafka_topic, group_id=kafka_group_id, bootstrap_servers=[kafka_host]) + consumer = KafkaConsumer(kafka_topic, group_id=kafka_group_id, bootstrap_servers=[kafka_host], auto_offset_reset=topic_offset) + + if kafka_produce_topic: + producer = KafkaProducer(bootstrap_servers=[kafka_host]) + + if stats_interval: + elem_count = 0 + time_count = int(time.time()) for message in consumer: + value = message.value + + # + # XXX: data enrichments, manipulations, correlations, etc. go here + # + + if stats_interval: + time_now = int(time.time()) + if avro_schema: inputio = StringIO.StringIO(message.value) decoder = avro.io.BinaryDecoder(inputio) datum_reader = avro.io.DatumReader(avro_schema) + avro_data = [] while inputio.tell() < len(inputio.getvalue()): x = datum_reader.read(decoder) avro_data.append(str(x)) - print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, - message.offset, message.key, (",".join(avro_data)))) + + if stats_interval: + elem_count += len(avro_data) + + if print_stdout: + print("%s:%d:%d: pid=%d key=%s value=%s" % (message.topic, message.partition, + message.offset, mypid, message.key, (",\n".join(avro_data)))) + sys.stdout.flush() + print_stdout_num += 1 + if (print_stdout_max == print_stdout_num): + sys.exit(0) + + if http_url_post: + http_req = urllib2.Request(http_url_post) + http_req.add_header('Content-Type', 'application/json') + post_to_url(http_req, ("\n".join(avro_data))) else: - print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, - message.offset, message.key, message.value)) + if stats_interval: + elem_count += value.count('\n') + elem_count += 1 + + if convert_to_json_array: + value = "[" + value + "]" + value = value.replace('\n', ',\n') + value = value.replace(',\n]', ']') + + if print_stdout: + print("%s:%d:%d: pid=%d key=%s value=%s" % (message.topic, message.partition, + message.offset, mypid, message.key, value)) + sys.stdout.flush() + + if http_url_post: + http_req = urllib2.Request(http_url_post) + http_req.add_header('Content-Type', 'application/json') + post_to_url(http_req, value) + + if kafka_produce_topic: + producer.send(kafka_produce_topic, value) + + if stats_interval: + if time_now >= (time_count + stats_interval): + print("INFO: stats: [ time=%s interval=%d records=%d pid=%d ]" % + (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_now)), stats_interval, elem_count), mypid) + sys.stdout.flush() + time_count = time_now + elem_count = 0 if __name__ == "__main__": main() diff -Nru pmacct-1.6.1/examples/networks.lst.example pmacct-1.7.0/examples/networks.lst.example --- pmacct-1.6.1/examples/networks.lst.example 2016-10-01 22:48:58.000000000 +0000 +++ pmacct-1.7.0/examples/networks.lst.example 2017-10-20 16:56:19.000000000 +0000 @@ -1,7 +1,7 @@ ! ! Sample networks-list; enabled by 'networks_file' key. ! -! Format supported: [