diff -Nru lttnganalyses-0.3.0/ChangeLog lttnganalyses-0.4.3/ChangeLog --- lttnganalyses-0.3.0/ChangeLog 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/ChangeLog 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,183 @@ +2016-03-07 LTTng analyses 0.4.3 + * Tests fixes (timezone issues) + +2016-03-01 LTTng analyses 0.4.2 + * Packaging fixes + +2016-02-29 LTTng analyses 0.4.1 + * Packaging fixes + +2016-02-26 LTTng analyses 0.4.0 + * Scheduler latency analyses + * Priority fields in CPU and latency analyses + * Machine Interface (json) output + * Period-based analyses (begin and end events) + * Refactoring/Bugfixes/Cleanup + * Basic testing infrastructure + +2015-07-13 LTTng analyses 0.3.0 + * Merge pull request #23 from mjeanson/master + * Convert README to reStructuredText + * Fix pep8 errors + * Refactor in a single package with subpackages + * fix: stats with 0 requests + * Check for babeltrace python package on installation + * Define version once per package only + * Add ChangeLog file + +2015-04-20 LTTng analyses 0.2.0 + * Merge pull request #22 from abusque/refactor-syscallstats + * Bump version to 0.2 + * Refactor syscallstats script to use new analysis backend + * Rename min/max attributes to avoid collision with built-ins + * Merge pull request #21 from abusque/decouple-io + * Implement check for --end argument before start of trace + * Style: fix indentation in _get_io_requests + * Fix: set pid correctly on FileStats init + * Fix typo in _fix_context_pid + * Fix: use TID instead of PID in file stats if PID is None + * Refactor io latency freq output + * Lint: remove unused import, fix 'dangerous' default args + * Refactor io top and log views + * Remove deprecated --extra argument + * Fix: correct typo and existence test in fd getter + * Fix: correct typo in ns_to_hour_nsec output + * Style: fix pylint/pep8 style issues + * Replace map() by list comprehension in disk latency stats + * Refactor IO Latency stats output methods + * Add generators to iterate over io requests + * Add method to compare equivalent io operations + * Fix: properly handle empty filters for IO file stats + * Fix FileStats reset() function + * Move _filter_process method to base command class + * Make _arg_pid_list list of ints instead of strings + * Refactor iotop per file analysis and output + * Refactor iotop output methods + * Add _print_ascii_graph method to simplify output of graphs + * Rename filter predicates to indicate visibility + * Remove deprecated breakcb in IO command + * Remove unused _compute_stats method from commands + * Rename IO command for consistency with other commands + * Track FDs chronologically in IO analysis + * Add timestamp to create/close FD notifications + * Remove dead code from IO cli + * Reset FD in IO Analysis + * Add support for pwrite* and pread* I/O syscalls + * Implement syscall I/O analysis + * Move returned_size attribute from SyscallIORequest into ReadWriteIORequest + * Send create process and fd notification on statedump events + * Send fd create and close notifications on sched events + * Fix: send create_fd notification for open io requests + * Add OP_READ_WRITE IO operation type for syscalls which both read and write + * Use a single method to track io request exits + * Refactor/rewrite IO state provider + * Refactor syscall analysis to use new SyscallEvent class + * Refactor NetStateProvider to use new SyscallEvent and io rq objects + * Refactor MemStateProvider to use new SyscallEvent and io rq objects + * Remove pending_syscalls array from State class + * Refactor statedump provider to track only state and not analysis related attributes + * Don't set deprecated parent_pid on FD object + * Use SyscallEvent objects in syscall state provider + * Remove Syscalls_stats class + * Remove analysis related attributes from FD class, add factory to create from open rq + * Add get_fd_type method to retrieve fd type from syscall name + * Add more IORequest classes, and io_rq attr to SyscallEvent + * Set SyscallEvent name using get_syscall_name method + * Remove analysis related attributes from Process state class + * Add more dup open syscalls, remove generic filenames from SyscallConsts + * Fix get_syscall_name string indexing + * Move IO syscalls handling into separate provider + * Strip prefixes from syscall names for brevity + * Merge branch 'master' into decouple-io + * Merge pull request #20 from abusque/linting + * Rename state to _state in providers for consistency + * Rename irq start/stop timestamps to begin/end for consistency + * Refactor IO Requests mechanism and (block I/O) analysis + * Track network usage in IO analysis + * Separate syscalls and io analyses + * Use del instead of pop when possible with fds and remove unused attributes + * Move date args processing to command, more linting + * Linting: rename p* to pattern + * Linting of common.py and related code + * Fix: make the regex strings raw strings + * fix for unknown pid in io.py + * Fix syscallstats command description method names + * Add IO analysis separate from syscalls + * Merge pull request #19 from jdesfossez/dev + * Fix: process the sched_switch for the swapper + * Fix: handle the case of missing PID + * Merge pull request #18 from abusque/decouple-cputop + * Revert accidental partial commit of syscalls.py + * Fix: remove deprecated last_sched attribute from Process class + * Fix: remove deprecated cpu_ns attribute from Process class + * Refactor cputop cli to work with new analysis module + * Implement cputop analysis module + * Fix: assign boolean instead of integer values for CLOEXEC + * Add class method to duplicate FD objects + * Remove non-state related attributes from process and cpu classes + * Refactor sched state provider to track current state only + * Remove deprecated perf context tracking in sched + * Fix: set cloexec on fd from flags on statedump + * remove old code (pre 0.1) that was kept as reference for the refactoring + * Merge pull request #17 from abusque/decouple-memtop + * Minor: fix pep8 style issues + * Decouple mem analysis from current state + * Rename notification callback methods to reflect public accessibility + * Add print date method to base command class + * Add reset method to Analysis classes + * Merge pull request #16 from abusque/decouple-modules + * Style: correct pep8 errors + * Fix: set cpu id in constructor + * Minor: add comment in irq state provider to clarify execptional softirq creation + * Style: rename method in memtop for consistency + * Fix tracking of softirq_raises and corresponding entries + * Fix: don't print raise_ts multiple times in irq log + * Simplify irq cli args transform + * Refactor IrqAnalysisCommand to work with rewritten analysis + * Add reset method to IrqStats + * Keep irq list by id and count irq raises + * Simplify filter_irq function in CLI + * Track CPU id in interrupt objects + * Rename irq analysis cli module to IrqAnalysisCommand to avoid ambiguity + * Implement filtering by duration for IrqAnalysis + * Update copyright info for modified files + * Implement initial IrqStats system for analysis + * fix: title + * new tool to filter a trace based on TID/Procname with follow-child support + * Style: replace double quotes by single quotes in lttnganalysescli + * Style: replace double quotes by single quotes in lttnganalyses + * Style: replace double quotes by single quotes in linuxautomaton + * Implement notification for communication from automaton to analyses + * Remove superfluous clear_screen string in irq _print_stats + * Refactor IRQ state provider and related classes + * Remove unused final argument in _print_results in cli + * Fix: don't count freed pages twice in memtop, reorganize printing code + * Fix: display unkwown for pname/pid in block read/write when we don't have the info + * Fix: check that current_tid is not None instead of -1 + * Initialize self.state in Command module when creating automaton + * Pythonify tests for empty or uninitialized structures and arguments + * Use None instead of -1 or 0 for default argument values + * Add callback registration to analysis module + * Replace usage of -1 as default/invalid value by None + * Clean-up mem and sched state providers and related modules. + * Replace integer logic by boolean value + * fix: missing sync in i/o syscalls list + * handle sys_accept4 + * Merge pull request #15 from abusque/deduplication + * Clean-up: dead code removal in linuxautomaton modules + * Remove deprecated ret_strings from syscalls.py + * Merge pull request #14 from abusque/email-fix + * Fix: correct typo in author email address + * Remove redundant IOCategory code + * Merge pull request #13 from abusque/chrono_fds + * Move track chrono fd code into method of Process class + * Track files from statedump in chrono_fds + * Fix: use event.timestamp instead of event[timestamp_begin] + * Track files opened before start of trace in chrono_fds + * Track chronological fd metadata + * fix override syscall name + * test override syscall name for epoll_ctl + * show tid value + * fix: handle unknown syscall return codes + * fix: handle unknown syscall return codes + * don't fail if some events are not available diff -Nru lttnganalyses-0.3.0/debian/changelog lttnganalyses-0.4.3/debian/changelog --- lttnganalyses-0.3.0/debian/changelog 2016-02-16 14:44:11.000000000 +0000 +++ lttnganalyses-0.4.3/debian/changelog 2016-03-08 15:29:30.000000000 +0000 @@ -1,8 +1,25 @@ -lttnganalyses (0.3.0-2build1) xenial; urgency=medium +lttnganalyses (0.4.3-1) unstable; urgency=medium - * No change rebuild to drop python3.4 support. + * [c9f34a4] New upstream version 0.4.3 - -- Dimitri John Ledkov Tue, 16 Feb 2016 14:44:11 +0000 + -- Michael Jeanson Tue, 08 Mar 2016 10:28:30 -0500 + +lttnganalyses (0.4.2-2) unstable; urgency=medium + + * [738a60e] Added homepage to control + * [7daee63] Reworked copyright file + + -- Michael Jeanson Wed, 02 Mar 2016 12:19:09 -0500 + +lttnganalyses (0.4.2-1) unstable; urgency=medium + + * [346d503] Added Jon Bernard to uploaders + * [de75a88] New upstream version 0.4.2 + * [675cf02] Update manpages for 0.4.2 + * [3acef35] Bump standards version to 3.9.7, no changes necessary + * [319b479] Update vcs fields to use secure uri + + -- Michael Jeanson Tue, 01 Mar 2016 11:55:33 -0500 lttnganalyses (0.3.0-2) unstable; urgency=medium diff -Nru lttnganalyses-0.3.0/debian/control lttnganalyses-0.4.3/debian/control --- lttnganalyses-0.3.0/debian/control 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/control 2016-03-08 15:29:30.000000000 +0000 @@ -1,13 +1,15 @@ Source: lttnganalyses Maintainer: Michael Jeanson +Uploaders: Jon Bernard Section: python Priority: optional Build-Depends: python3-setuptools, python3-all (>= 3.4), debhelper (>= 9), python3-babeltrace (>= 1.2) -Standards-Version: 3.9.6 +Standards-Version: 3.9.7 X-Python3-Version: >= 3.4 -Vcs-Git: git://anonscm.debian.org/collab-maint/lttnganalyses.git -Vcs-Browser: http://anonscm.debian.org/cgit/collab-maint/lttnganalyses.git +Vcs-Git: https://anonscm.debian.org/git/collab-maint/lttnganalyses.git +Vcs-Browser: https://anonscm.debian.org/cgit/collab-maint/lttnganalyses.git +Homepage: https://github.com/lttng/lttng-analyses Package: python3-lttnganalyses Architecture: all diff -Nru lttnganalyses-0.3.0/debian/copyright lttnganalyses-0.4.3/debian/copyright --- lttnganalyses-0.3.0/debian/copyright 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/copyright 2016-03-08 15:29:30.000000000 +0000 @@ -1,18 +1,81 @@ -Format: http://dep.debian.net/deps/dep5 +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: lttng-analyses Source: https://github.com/lttng/lttng-analyses Files: * Copyright: 2015 Julien Desfossez +License: MIT + +Files: debian/* +Copyright: 2015, 2016 Michael Jeanson +License: MIT + +Files: lttng-schedfreq +Copyright: 2015 Antoine Busque +License: MIT + +Files: lttnganalyses.egg-info/* +Copyright: 2015, 2016 Julien Desfossez + 2015, 2016 Antoine Busque +License: MIT + +Files: lttnganalyses/* +Copyright: 2015, 2016 Julien Desfossez + 2015, 2016 Antoine Busque +License: MIT + +Files: lttnganalyses/cli/* +Copyright: 2015 Philippe Proulx + 2015 Julien Desfossez 2015 Antoine Busque License: MIT -Files: lttnganalyses/ascii_graph/__init__.py -Copyright: 2012 Pierre-Francois Carpentier +Files: lttnganalyses/cli/mi.py +Copyright: 2015 Philippe Proulx License: MIT -Files: debian/* -Copyright: 2015 Michael Jeanson +Files: lttnganalyses/cli/sched.py +Copyright: 2015 Julien Desfossez + 2015 Antoine Busque +License: MIT + +Files: lttnganalyses/cli/termgraph.py +Copyright: 2016 Antoine Busque +License: MIT + +Files: lttnganalyses/common/* +Copyright: 2015 Antoine Busque +License: MIT + +Files: lttnganalyses/common/format_utils.py +Copyright: 2016 Antoine Busque +License: MIT + +Files: lttnganalyses/core/* +Copyright: 2015 Antoine Busque +License: MIT + +Files: lttnganalyses/core/cputop.py + lttnganalyses/core/sched.py +Copyright: 2015 Julien Desfossez + 2015 Antoine Busque +License: MIT + +Files: lttnganalyses/linuxautomaton/* +Copyright: 2015 Julien Desfossez + 2015 Antoine Busque +License: MIT + +Files: setup.py +Copyright: 2015 Michael Jeanson +License: MIT + +Files: tests/* +Copyright: 2016 Julien Desfossez +License: MIT + +Files: tests/__init__.py +Copyright: 2016 Antoine Busque License: MIT License: MIT diff -Nru lttnganalyses-0.3.0/debian/man/lttng-analyses-record.1 lttnganalyses-0.4.3/debian/man/lttng-analyses-record.1 --- lttnganalyses-0.3.0/debian/man/lttng-analyses-record.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-analyses-record.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,5 +1,5 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-ANALYSES-RECORD "1" "July 2015" "lttng-analyses-record 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-ANALYSES-RECORD "1" "March 2016" "lttng-analyses-record 0.4.2" "User Commands" .SH NAME lttng-analyses-record \- LTTng analyses trace recording helper .SH DESCRIPTION diff -Nru lttnganalyses-0.3.0/debian/man/lttng-cputop.1 lttnganalyses-0.4.3/debian/man/lttng-cputop.1 --- lttnganalyses-0.3.0/debian/man/lttng-cputop.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-cputop.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,13 +1,17 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-CPUTOP "1" "July 2015" "lttng-cputop 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-CPUTOP "1" "March 2016" "lttng-cputop 0.4.2" "User Commands" .SH NAME lttng-cputop \- LTTng analyses CPU top .SH DESCRIPTION -usage: lttng\-cputop [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-cputop [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-procname PROCNAME] [\-\-pid PID] -[\-V] +[\-\-begin BEGIN] [\-\-end END] [\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-limit LIMIT] [\-\-top] .PP The cputop command. @@ -21,35 +25,62 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top results diff -Nru lttnganalyses-0.3.0/debian/man/lttng-cputop-mi.1 lttnganalyses-0.4.3/debian/man/lttng-cputop-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-cputop-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-cputop-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,87 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-CPUTOP-MI "1" "March 2016" "lttng-cputop-mi 0.4.2" "User Commands" +.SH NAME +lttng-cputop-mi \- lttng-cputop-mi +.SH DESCRIPTION +usage: lttng\-cputop\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-limit LIMIT] +[\-\-top] +[ [ ...]] +.PP +The cputop command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top results diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencyfreq.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencyfreq.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencyfreq.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencyfreq.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,17 +1,23 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IOLATENCYFREQ "1" "July 2015" "lttng-iolatencyfreq 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYFREQ "1" "March 2016" "lttng-iolatencyfreq 0.4.2" "User Commands" .SH NAME lttng-iolatencyfreq \- LTTng analyses IO latency frequency .SH DESCRIPTION -usage: lttng\-iolatencyfreq [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-iolatencyfreq [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] -[\-\-end END] [\-\-timerange TIMERANGE] -[\-\-procname PROCNAME] [\-\-pid PID] [\-\-max MAX] -[\-\-min MIN] [\-\-maxsize MAXSIZE] [\-\-minsize MINSIZE] -[\-\-log] [\-\-usage] [\-\-latencystats] [\-\-latencytop] -[\-\-latencyfreq] [\-\-freq\-resolution FREQ_RESOLUTION] -[\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] +[\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] .PP The I/O command. @@ -25,67 +31,98 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec .TP -\fB\-\-maxsize\fR MAXSIZE -Filter out, I/O operations working with more that -maxsize bytes -.TP -\fB\-\-minsize\fR MINSIZE -Filter out, I/O operations working with less that -minsize bytes +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-log\fR -Display the events in the order they appeared +Output the I/O requests in chronological order .TP -\fB\-\-usage\fR -Show the I/O usage +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) .TP -\fB\-\-latencystats\fR -Show the I/O latency statistics +\fB\-\-top\fR +Output the top I/O latencies by category .TP -\fB\-\-latencytop\fR -Show the I/O latency top +\fB\-\-stats\fR +Output the I/O latency statistics .TP -\fB\-\-latencyfreq\fR -Show the I/O latency frequency distribution +\fB\-\-freq\fR +Output the I/O latency frequency distribution .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencyfreq-mi.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencyfreq-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencyfreq-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencyfreq-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,128 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYFREQ-MI "1" "March 2016" "lttng-iolatencyfreq-mi 0.4.2" "User Commands" +.SH NAME +lttng-iolatencyfreq-mi \- lttng-iolatencyfreq-mi +.SH DESCRIPTION +usage: lttng\-iolatencyfreq\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] +[\-\-cpu CPU] [\-\-timerange TIMERANGE] [\-V] +[\-\-metadata] [\-\-procname PROCNAME] [\-\-tid TID] +[\-\-min MIN] [\-\-max MAX] [\-\-log] [\-\-limit LIMIT] +[\-\-top] [\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] +[ [ ...]] +.PP +The I/O command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-log\fR +Output the I/O requests in chronological order +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top I/O latencies by category +.TP +\fB\-\-stats\fR +Output the I/O latency statistics +.TP +\fB\-\-freq\fR +Output the I/O latency frequency distribution +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencystats.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencystats.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencystats.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencystats.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,17 +1,23 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IOLATENCYSTATS "1" "July 2015" "lttng-iolatencystats 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYSTATS "1" "March 2016" "lttng-iolatencystats 0.4.2" "User Commands" .SH NAME lttng-iolatencystats \- LTTng analyses IO latency statistics .SH DESCRIPTION -usage: lttng\-iolatencystats [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-iolatencystats [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] -[\-\-end END] [\-\-timerange TIMERANGE] -[\-\-procname PROCNAME] [\-\-pid PID] [\-\-max MAX] -[\-\-min MIN] [\-\-maxsize MAXSIZE] -[\-\-minsize MINSIZE] [\-\-log] [\-\-usage] -[\-\-latencystats] [\-\-latencytop] [\-\-latencyfreq] -[\-\-freq\-resolution FREQ_RESOLUTION] [\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] +[\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] .PP The I/O command. @@ -25,67 +31,98 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec .TP -\fB\-\-maxsize\fR MAXSIZE -Filter out, I/O operations working with more that -maxsize bytes -.TP -\fB\-\-minsize\fR MINSIZE -Filter out, I/O operations working with less that -minsize bytes +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-log\fR -Display the events in the order they appeared +Output the I/O requests in chronological order .TP -\fB\-\-usage\fR -Show the I/O usage +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) .TP -\fB\-\-latencystats\fR -Show the I/O latency statistics +\fB\-\-top\fR +Output the top I/O latencies by category .TP -\fB\-\-latencytop\fR -Show the I/O latency top +\fB\-\-stats\fR +Output the I/O latency statistics .TP -\fB\-\-latencyfreq\fR -Show the I/O latency frequency distribution +\fB\-\-freq\fR +Output the I/O latency frequency distribution .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencystats-mi.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencystats-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencystats-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencystats-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,128 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYSTATS-MI "1" "March 2016" "lttng-iolatencystats-mi 0.4.2" "User Commands" +.SH NAME +lttng-iolatencystats-mi \- lttng-iolatencystats-mi +.SH DESCRIPTION +usage: lttng\-iolatencystats\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] +[\-\-cpu CPU] [\-\-timerange TIMERANGE] [\-V] +[\-\-metadata] [\-\-procname PROCNAME] [\-\-tid TID] +[\-\-min MIN] [\-\-max MAX] [\-\-log] [\-\-limit LIMIT] +[\-\-top] [\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] +[ [ ...]] +.PP +The I/O command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-log\fR +Output the I/O requests in chronological order +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top I/O latencies by category +.TP +\fB\-\-stats\fR +Output the I/O latency statistics +.TP +\fB\-\-freq\fR +Output the I/O latency frequency distribution +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencytop.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencytop.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencytop.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencytop.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,17 +1,23 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IOLATENCYTOP "1" "July 2015" "lttng-iolatencytop 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYTOP "1" "March 2016" "lttng-iolatencytop 0.4.2" "User Commands" .SH NAME lttng-iolatencytop \- LTTng analyses IO latency top .SH DESCRIPTION -usage: lttng\-iolatencytop [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-iolatencytop [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] -[\-\-end END] [\-\-timerange TIMERANGE] -[\-\-procname PROCNAME] [\-\-pid PID] [\-\-max MAX] -[\-\-min MIN] [\-\-maxsize MAXSIZE] [\-\-minsize MINSIZE] -[\-\-log] [\-\-usage] [\-\-latencystats] [\-\-latencytop] -[\-\-latencyfreq] [\-\-freq\-resolution FREQ_RESOLUTION] -[\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] +[\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-usage] [\-\-minsize MINSIZE] +[\-\-maxsize MAXSIZE] .PP The I/O command. @@ -25,67 +31,98 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec .TP -\fB\-\-maxsize\fR MAXSIZE -Filter out, I/O operations working with more that -maxsize bytes -.TP -\fB\-\-minsize\fR MINSIZE -Filter out, I/O operations working with less that -minsize bytes +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-log\fR -Display the events in the order they appeared +Output the I/O requests in chronological order .TP -\fB\-\-usage\fR -Show the I/O usage +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) .TP -\fB\-\-latencystats\fR -Show the I/O latency statistics +\fB\-\-top\fR +Output the top I/O latencies by category .TP -\fB\-\-latencytop\fR -Show the I/O latency top +\fB\-\-stats\fR +Output the I/O latency statistics .TP -\fB\-\-latencyfreq\fR -Show the I/O latency frequency distribution +\fB\-\-freq\fR +Output the I/O latency frequency distribution .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolatencytop-mi.1 lttnganalyses-0.4.3/debian/man/lttng-iolatencytop-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolatencytop-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolatencytop-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,128 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLATENCYTOP-MI "1" "March 2016" "lttng-iolatencytop-mi 0.4.2" "User Commands" +.SH NAME +lttng-iolatencytop-mi \- lttng-iolatencytop-mi +.SH DESCRIPTION +usage: lttng\-iolatencytop\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] +[\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] +[ [ ...]] +.PP +The I/O command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-log\fR +Output the I/O requests in chronological order +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top I/O latencies by category +.TP +\fB\-\-stats\fR +Output the I/O latency statistics +.TP +\fB\-\-freq\fR +Output the I/O latency frequency distribution +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolog.1 lttnganalyses-0.4.3/debian/man/lttng-iolog.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolog.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolog.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,16 +1,21 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IOLOG "1" "July 2015" "lttng-iolog 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLOG "1" "March 2016" "lttng-iolog 0.4.2" "User Commands" .SH NAME lttng-iolog \- LTTng analyses IO log .SH DESCRIPTION -usage: lttng\-iolog [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-iolog [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .IP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-procname PROCNAME] [\-\-pid PID] -[\-\-max MAX] [\-\-min MIN] [\-\-maxsize MAXSIZE] -[\-\-minsize MINSIZE] [\-\-log] [\-\-usage] [\-\-latencystats] -[\-\-latencytop] [\-\-latencyfreq] -[\-\-freq\-resolution FREQ_RESOLUTION] [\-V] +[\-\-begin BEGIN] [\-\-end END] [\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] [\-\-max MAX] +[\-\-log] [\-\-limit LIMIT] [\-\-top] [\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-usage] [\-\-minsize MINSIZE] +[\-\-maxsize MAXSIZE] .PP The I/O command. @@ -24,67 +29,98 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec .TP -\fB\-\-maxsize\fR MAXSIZE -Filter out, I/O operations working with more that -maxsize bytes -.TP -\fB\-\-minsize\fR MINSIZE -Filter out, I/O operations working with less that -minsize bytes +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-log\fR -Display the events in the order they appeared +Output the I/O requests in chronological order .TP -\fB\-\-usage\fR -Show the I/O usage +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) .TP -\fB\-\-latencystats\fR -Show the I/O latency statistics +\fB\-\-top\fR +Output the top I/O latencies by category .TP -\fB\-\-latencytop\fR -Show the I/O latency top +\fB\-\-stats\fR +Output the I/O latency statistics .TP -\fB\-\-latencyfreq\fR -Show the I/O latency frequency distribution +\fB\-\-freq\fR +Output the I/O latency frequency distribution .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iolog-mi.1 lttnganalyses-0.4.3/debian/man/lttng-iolog-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-iolog-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iolog-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,126 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOLOG-MI "1" "March 2016" "lttng-iolog-mi 0.4.2" "User Commands" +.SH NAME +lttng-iolog-mi \- lttng-iolog-mi +.SH DESCRIPTION +usage: lttng\-iolog\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] [\-\-stats] +[\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] +[ [ ...]] +.PP +The I/O command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-log\fR +Output the I/O requests in chronological order +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top I/O latencies by category +.TP +\fB\-\-stats\fR +Output the I/O latency statistics +.TP +\fB\-\-freq\fR +Output the I/O latency frequency distribution +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iousagetop.1 lttnganalyses-0.4.3/debian/man/lttng-iousagetop.1 --- lttnganalyses-0.3.0/debian/man/lttng-iousagetop.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iousagetop.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,17 +1,22 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IOUSAGETOP "1" "July 2015" "lttng-iousagetop 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOUSAGETOP "1" "March 2016" "lttng-iousagetop 0.4.2" "User Commands" .SH NAME lttng-iousagetop \- LTTng analyses IO usage top .SH DESCRIPTION -usage: lttng\-iousagetop [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-iousagetop [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] -[\-\-end END] [\-\-timerange TIMERANGE] -[\-\-procname PROCNAME] [\-\-pid PID] [\-\-max MAX] -[\-\-min MIN] [\-\-maxsize MAXSIZE] [\-\-minsize MINSIZE] -[\-\-log] [\-\-usage] [\-\-latencystats] [\-\-latencytop] -[\-\-latencyfreq] [\-\-freq\-resolution FREQ_RESOLUTION] -[\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] [\-\-stats] +[\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] .PP The I/O command. @@ -25,67 +30,98 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec .TP -\fB\-\-maxsize\fR MAXSIZE -Filter out, I/O operations working with more that -maxsize bytes -.TP -\fB\-\-minsize\fR MINSIZE -Filter out, I/O operations working with less that -minsize bytes +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-log\fR -Display the events in the order they appeared +Output the I/O requests in chronological order .TP -\fB\-\-usage\fR -Show the I/O usage +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) .TP -\fB\-\-latencystats\fR -Show the I/O latency statistics +\fB\-\-top\fR +Output the top I/O latencies by category .TP -\fB\-\-latencytop\fR -Show the I/O latency top +\fB\-\-stats\fR +Output the I/O latency statistics .TP -\fB\-\-latencyfreq\fR -Show the I/O latency frequency distribution +\fB\-\-freq\fR +Output the I/O latency frequency distribution .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-iousagetop-mi.1 lttnganalyses-0.4.3/debian/man/lttng-iousagetop-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-iousagetop-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-iousagetop-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,128 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IOUSAGETOP-MI "1" "March 2016" "lttng-iousagetop-mi 0.4.2" "User Commands" +.SH NAME +lttng-iousagetop-mi \- lttng-iousagetop-mi +.SH DESCRIPTION +usage: lttng\-iousagetop\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-min MIN] +[\-\-max MAX] [\-\-log] [\-\-limit LIMIT] [\-\-top] +[\-\-stats] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-usage] +[\-\-minsize MINSIZE] [\-\-maxsize MAXSIZE] +[ [ ...]] +.PP +The I/O command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-log\fR +Output the I/O requests in chronological order +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top I/O latencies by category +.TP +\fB\-\-stats\fR +Output the I/O latency statistics +.TP +\fB\-\-freq\fR +Output the I/O latency frequency distribution +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-usage\fR +Output the I/O usage +.TP +\fB\-\-minsize\fR MINSIZE +Filter out, I/O operations working with less that +minsize bytes +.TP +\fB\-\-maxsize\fR MAXSIZE +Filter out, I/O operations working with more that +maxsize bytes diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqfreq.1 lttnganalyses-0.4.3/debian/man/lttng-irqfreq.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqfreq.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqfreq.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,14 +1,19 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IRQFREQ "1" "July 2015" "lttng-irqfreq 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQFREQ "1" "March 2016" "lttng-irqfreq 0.4.2" "User Commands" .SH NAME lttng-irqfreq \- LTTng analyses IRQ frequency .SH DESCRIPTION -usage: lttng\-irqfreq [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-irqfreq [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-max MAX] [\-\-min MIN] [\-\-freq] -[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-log] [\-\-stats] -[\-\-irq IRQ] [\-\-softirq SOFTIRQ] [\-V] +[\-\-begin BEGIN] [\-\-end END] [\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-log] [\-\-stats] +[\-\-irq IRQ] [\-\-softirq SOFTIRQ] .PP The irq command. @@ -22,53 +27,81 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-freq\fR -Show the frequency distribution of handler duration +Output the frequency distribution of handler durations .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP \fB\-\-log\fR -Display the events in the order they appeared +Output the IRQs in chronological order .TP \fB\-\-stats\fR -Display the statistics +Output IRQ statistics .TP \fB\-\-irq\fR IRQ -Show results only for the list of IRQ +Output results only for the list of IRQ .TP \fB\-\-softirq\fR SOFTIRQ -Show results only for the list of SoftIRQ -.TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqfreq-mi.1 lttnganalyses-0.4.3/debian/man/lttng-irqfreq-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqfreq-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqfreq-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,109 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQFREQ-MI "1" "March 2016" "lttng-irqfreq-mi 0.4.2" "User Commands" +.SH NAME +lttng-irqfreq-mi \- lttng-irqfreq-mi +.SH DESCRIPTION +usage: lttng\-irqfreq\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-log] [\-\-stats] [\-\-irq IRQ] +[\-\-softirq SOFTIRQ] +[ [ ...]] +.PP +The irq command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-freq\fR +Output the frequency distribution of handler durations +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-log\fR +Output the IRQs in chronological order +.TP +\fB\-\-stats\fR +Output IRQ statistics +.TP +\fB\-\-irq\fR IRQ +Output results only for the list of IRQ +.TP +\fB\-\-softirq\fR SOFTIRQ +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqlog.1 lttnganalyses-0.4.3/debian/man/lttng-irqlog.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqlog.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqlog.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,14 +1,19 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IRQLOG "1" "July 2015" "lttng-irqlog 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQLOG "1" "March 2016" "lttng-irqlog 0.4.2" "User Commands" .SH NAME lttng-irqlog \- LTTng analyses IRQ log .SH DESCRIPTION -usage: lttng\-irqlog [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-irqlog [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-max MAX] [\-\-min MIN] [\-\-freq] -[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-log] [\-\-stats] -[\-\-irq IRQ] [\-\-softirq SOFTIRQ] [\-V] +[\-\-begin BEGIN] [\-\-end END] [\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-log] [\-\-stats] +[\-\-irq IRQ] [\-\-softirq SOFTIRQ] .PP The irq command. @@ -22,53 +27,81 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-freq\fR -Show the frequency distribution of handler duration +Output the frequency distribution of handler durations .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP \fB\-\-log\fR -Display the events in the order they appeared +Output the IRQs in chronological order .TP \fB\-\-stats\fR -Display the statistics +Output IRQ statistics .TP \fB\-\-irq\fR IRQ -Show results only for the list of IRQ +Output results only for the list of IRQ .TP \fB\-\-softirq\fR SOFTIRQ -Show results only for the list of SoftIRQ -.TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqlog-mi.1 lttnganalyses-0.4.3/debian/man/lttng-irqlog-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqlog-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqlog-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,108 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQLOG-MI "1" "March 2016" "lttng-irqlog-mi 0.4.2" "User Commands" +.SH NAME +lttng-irqlog-mi \- lttng-irqlog-mi +.SH DESCRIPTION +usage: lttng\-irqlog\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-log] [\-\-stats] [\-\-irq IRQ] +[\-\-softirq SOFTIRQ] +[ [ ...]] +.PP +The irq command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-freq\fR +Output the frequency distribution of handler durations +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-log\fR +Output the IRQs in chronological order +.TP +\fB\-\-stats\fR +Output IRQ statistics +.TP +\fB\-\-irq\fR IRQ +Output results only for the list of IRQ +.TP +\fB\-\-softirq\fR SOFTIRQ +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqstats.1 lttnganalyses-0.4.3/debian/man/lttng-irqstats.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqstats.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqstats.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,14 +1,19 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-IRQSTATS "1" "July 2015" "lttng-irqstats 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQSTATS "1" "March 2016" "lttng-irqstats 0.4.2" "User Commands" .SH NAME lttng-irqstats \- LTTng analyses IRQ statistics .SH DESCRIPTION -usage: lttng\-irqstats [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-irqstats [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-max MAX] [\-\-min MIN] [\-\-freq] -[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-log] [\-\-stats] -[\-\-irq IRQ] [\-\-softirq SOFTIRQ] [\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-log] [\-\-stats] +[\-\-irq IRQ] [\-\-softirq SOFTIRQ] .PP The irq command. @@ -22,53 +27,81 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP -\fB\-\-max\fR MAX -Filter out, duration longer than max usec +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar .TP \fB\-\-min\fR MIN -Filter out, duration shorter than min usec +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec .TP \fB\-\-freq\fR -Show the frequency distribution of handler duration +Output the frequency distribution of handler durations .TP \fB\-\-freq\-resolution\fR FREQ_RESOLUTION Frequency distribution resolution (default 20) .TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP \fB\-\-log\fR -Display the events in the order they appeared +Output the IRQs in chronological order .TP \fB\-\-stats\fR -Display the statistics +Output IRQ statistics .TP \fB\-\-irq\fR IRQ -Show results only for the list of IRQ +Output results only for the list of IRQ .TP \fB\-\-softirq\fR SOFTIRQ -Show results only for the list of SoftIRQ -.TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-irqstats-mi.1 lttnganalyses-0.4.3/debian/man/lttng-irqstats-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-irqstats-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-irqstats-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,109 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-IRQSTATS-MI "1" "March 2016" "lttng-irqstats-mi 0.4.2" "User Commands" +.SH NAME +lttng-irqstats-mi \- lttng-irqstats-mi +.SH DESCRIPTION +usage: lttng\-irqstats\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] [\-\-min MIN] +[\-\-max MAX] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-log] [\-\-stats] [\-\-irq IRQ] +[\-\-softirq SOFTIRQ] +[ [ ...]] +.PP +The irq command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-freq\fR +Output the frequency distribution of handler durations +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-log\fR +Output the IRQs in chronological order +.TP +\fB\-\-stats\fR +Output IRQ statistics +.TP +\fB\-\-irq\fR IRQ +Output results only for the list of IRQ +.TP +\fB\-\-softirq\fR SOFTIRQ +Output results only for the list of SoftIRQ diff -Nru lttnganalyses-0.3.0/debian/man/lttng-memtop.1 lttnganalyses-0.4.3/debian/man/lttng-memtop.1 --- lttnganalyses-0.3.0/debian/man/lttng-memtop.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-memtop.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,13 +1,17 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-MEMTOP "1" "July 2015" "lttng-memtop 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-MEMTOP "1" "March 2016" "lttng-memtop 0.4.2" "User Commands" .SH NAME lttng-memtop \- LTTng analyses memory top .SH DESCRIPTION -usage: lttng\-memtop [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-memtop [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] [\-\-end END] -[\-\-timerange TIMERANGE] [\-\-procname PROCNAME] [\-\-pid PID] -[\-V] +[\-\-begin BEGIN] [\-\-end END] [\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-limit LIMIT] [\-\-top] .PP The memtop command. @@ -21,35 +25,62 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs .TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top results diff -Nru lttnganalyses-0.3.0/debian/man/lttng-memtop-mi.1 lttnganalyses-0.4.3/debian/man/lttng-memtop-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-memtop-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-memtop-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,87 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-MEMTOP-MI "1" "March 2016" "lttng-memtop-mi 0.4.2" "User Commands" +.SH NAME +lttng-memtop-mi \- lttng-memtop-mi +.SH DESCRIPTION +usage: lttng\-memtop\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] [\-\-limit LIMIT] +[\-\-top] +[ [ ...]] +.PP +The memtop command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top results diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedfreq.1 lttnganalyses-0.4.3/debian/man/lttng-schedfreq.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedfreq.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedfreq.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,126 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDFREQ "1" "March 2016" "lttng-schedfreq 0.4.2" "User Commands" +.SH NAME +lttng-schedfreq \- LTTng analyses Scheduler frequency +.SH DESCRIPTION +usage: lttng\-schedfreq [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-min MIN] [\-\-max MAX] [\-\-procname PROCNAME] +[\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-limit LIMIT] [\-\-top] [\-\-log] +[\-\-stats] [\-\-total] [\-\-per\-tid] [\-\-per\-prio] + +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedfreq-mi.1 lttnganalyses-0.4.3/debian/man/lttng-schedfreq-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedfreq-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedfreq-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,127 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDFREQ-MI "1" "March 2016" "lttng-schedfreq-mi 0.4.2" "User Commands" +.SH NAME +lttng-schedfreq-mi \- lttng-schedfreq-mi +.SH DESCRIPTION +usage: lttng\-schedfreq\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-min MIN] [\-\-max MAX] [\-\-procname PROCNAME] +[\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-limit LIMIT] [\-\-top] [\-\-log] +[\-\-stats] [\-\-total] [\-\-per\-tid] [\-\-per\-prio] +[ [ ...]] +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedlog.1 lttnganalyses-0.4.3/debian/man/lttng-schedlog.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedlog.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedlog.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,125 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDLOG "1" "March 2016" "lttng-schedlog 0.4.2" "User Commands" +.SH NAME +lttng-schedlog \- LTTng analyses Scheduler log +.SH DESCRIPTION +usage: lttng\-schedlog [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] [\-\-min MIN] +[\-\-max MAX] [\-\-procname PROCNAME] [\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-limit LIMIT] [\-\-top] [\-\-log] +[\-\-stats] [\-\-total] [\-\-per\-tid] [\-\-per\-prio] + +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedlog-mi.1 lttnganalyses-0.4.3/debian/man/lttng-schedlog-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedlog-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedlog-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,127 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDLOG-MI "1" "March 2016" "lttng-schedlog-mi 0.4.2" "User Commands" +.SH NAME +lttng-schedlog-mi \- lttng-schedlog-mi +.SH DESCRIPTION +usage: lttng\-schedlog\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] [\-\-min MIN] +[\-\-max MAX] [\-\-procname PROCNAME] [\-\-tid TID] +[\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-limit LIMIT] +[\-\-top] [\-\-log] [\-\-stats] [\-\-total] [\-\-per\-tid] +[\-\-per\-prio] +[ [ ...]] +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedstats.1 lttnganalyses-0.4.3/debian/man/lttng-schedstats.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedstats.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedstats.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,127 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDSTATS "1" "March 2016" "lttng-schedstats 0.4.2" "User Commands" +.SH NAME +lttng-schedstats \- LTTng analyses Scheduler statistics +.SH DESCRIPTION +usage: lttng\-schedstats [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-min MIN] [\-\-max MAX] [\-\-procname PROCNAME] +[\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-limit LIMIT] [\-\-top] [\-\-log] +[\-\-stats] [\-\-total] [\-\-per\-tid] [\-\-per\-prio] + +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedstats-mi.1 lttnganalyses-0.4.3/debian/man/lttng-schedstats-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedstats-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedstats-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,128 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDSTATS-MI "1" "March 2016" "lttng-schedstats-mi 0.4.2" "User Commands" +.SH NAME +lttng-schedstats-mi \- lttng-schedstats-mi +.SH DESCRIPTION +usage: lttng\-schedstats\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-min MIN] [\-\-max MAX] [\-\-procname PROCNAME] +[\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-limit LIMIT] +[\-\-top] [\-\-log] [\-\-stats] [\-\-total] [\-\-per\-tid] +[\-\-per\-prio] +[ [ ...]] +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedtop.1 lttnganalyses-0.4.3/debian/man/lttng-schedtop.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedtop.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedtop.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,125 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDTOP "1" "March 2016" "lttng-schedtop 0.4.2" "User Commands" +.SH NAME +lttng-schedtop \- LTTng analyses Scheduler top +.SH DESCRIPTION +usage: lttng\-schedtop [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] [\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] [\-\-min MIN] +[\-\-max MAX] [\-\-procname PROCNAME] [\-\-tid TID] [\-\-freq] +[\-\-freq\-resolution FREQ_RESOLUTION] [\-\-freq\-uniform] +[\-\-freq\-series] [\-\-limit LIMIT] [\-\-top] [\-\-log] +[\-\-stats] [\-\-total] [\-\-per\-tid] [\-\-per\-prio] + +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-schedtop-mi.1 lttnganalyses-0.4.3/debian/man/lttng-schedtop-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-schedtop-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-schedtop-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,127 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SCHEDTOP-MI "1" "March 2016" "lttng-schedtop-mi 0.4.2" "User Commands" +.SH NAME +lttng-schedtop-mi \- lttng-schedtop-mi +.SH DESCRIPTION +usage: lttng\-schedtop\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] [\-\-min MIN] +[\-\-max MAX] [\-\-procname PROCNAME] [\-\-tid TID] +[\-\-freq] [\-\-freq\-resolution FREQ_RESOLUTION] +[\-\-freq\-uniform] [\-\-freq\-series] [\-\-limit LIMIT] +[\-\-top] [\-\-log] [\-\-stats] [\-\-total] [\-\-per\-tid] +[\-\-per\-prio] +[ [ ...]] +.PP +The sched command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-min\fR MIN +Filter out durations shorter than min usec +.TP +\fB\-\-max\fR MAX +Filter out durations longer than max usec +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs +.TP +\fB\-\-freq\fR +Output the frequency distribution of sched switch +latencies +.TP +\fB\-\-freq\-resolution\fR FREQ_RESOLUTION +Frequency distribution resolution (default 20) +.TP +\fB\-\-freq\-uniform\fR +Use a uniform resolution across distributions +.TP +\fB\-\-freq\-series\fR +Consolidate frequency distribution histogram as a +single one +.TP +\fB\-\-limit\fR LIMIT +Limit to top X (default = 10) +.TP +\fB\-\-top\fR +Output the top sched switch latencies +.TP +\fB\-\-log\fR +Output the sched switches in chronological order +.TP +\fB\-\-stats\fR +Output sched switch statistics +.TP +\fB\-\-total\fR +Group all results (applies to stats and freq) +.TP +\fB\-\-per\-tid\fR +Group results per\-TID (applies to stats and freq) +(default) +.TP +\fB\-\-per\-prio\fR +Group results per\-prio (applies to stats and freq) diff -Nru lttnganalyses-0.3.0/debian/man/lttng-syscallstats.1 lttnganalyses-0.4.3/debian/man/lttng-syscallstats.1 --- lttnganalyses-0.3.0/debian/man/lttng-syscallstats.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-syscallstats.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,13 +1,18 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-SYSCALLSTATS "1" "July 2015" "lttng-syscallstats 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SYSCALLSTATS "1" "March 2016" "lttng-syscallstats 0.4.2" "User Commands" .SH NAME lttng-syscallstats \- LTTng analyses syscall statistics .SH DESCRIPTION -usage: lttng\-syscallstats [\-h] [\-r REFRESH] [\-\-limit LIMIT] [\-\-no\-progress] +usage: lttng\-syscallstats [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] .TP -[\-\-skip\-validation] [\-\-gmt] [\-\-begin BEGIN] -[\-\-end END] [\-\-timerange TIMERANGE] -[\-\-procname PROCNAME] [\-\-pid PID] [\-V] +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-no\-progress] +[\-\-procname PROCNAME] [\-\-tid TID] .PP The syscallstats command. @@ -21,35 +26,56 @@ show this help message and exit .TP \fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH -Refresh period in seconds -.TP -\fB\-\-limit\fR LIMIT -Limit to top X (default = 10) -.TP -\fB\-\-no\-progress\fR -Don't display the progress bar -.TP -\fB\-\-skip\-validation\fR -Skip the trace validation +Refresh period, with optional units suffix (default +units: s) .TP \fB\-\-gmt\fR Manipulate timestamps based on GMT instead of local time .TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP \fB\-\-begin\fR BEGIN start time: hh:mm:ss[.nnnnnnnnn] .TP \fB\-\-end\fR END end time: hh:mm:ss[.nnnnnnnnn] .TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP \fB\-\-timerange\fR TIMERANGE time range: [begin,end] .TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-no\-progress\fR +Don't display the progress bar +.TP \fB\-\-procname\fR PROCNAME Filter the results only for this list of process names .TP -\fB\-\-pid\fR PID -Filter the results only for this list of PIDs -.TP -\fB\-V\fR, \fB\-\-version\fR -show program's version number and exit +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs diff -Nru lttnganalyses-0.3.0/debian/man/lttng-syscallstats-mi.1 lttnganalyses-0.4.3/debian/man/lttng-syscallstats-mi.1 --- lttnganalyses-0.3.0/debian/man/lttng-syscallstats-mi.1 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-syscallstats-mi.1 2016-03-08 15:29:30.000000000 +0000 @@ -0,0 +1,81 @@ +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-SYSCALLSTATS-MI "1" "March 2016" "lttng-syscallstats-mi 0.4.2" "User Commands" +.SH NAME +lttng-syscallstats-mi \- lttng-syscallstats-mi +.SH DESCRIPTION +usage: lttng\-syscallstats\-mi [\-h] [\-r REFRESH] [\-\-gmt] [\-\-skip\-validation] +.TP +[\-\-begin BEGIN] [\-\-end END] +[\-\-period\-begin PERIOD_BEGIN] +[\-\-period\-end PERIOD_END] +[\-\-period\-begin\-key PERIOD_BEGIN_KEY] +[\-\-period\-end\-key PERIOD_END_KEY] +[\-\-period\-key\-value PERIOD_KEY_VALUE] [\-\-cpu CPU] +[\-\-timerange TIMERANGE] [\-V] [\-\-metadata] +[\-\-procname PROCNAME] [\-\-tid TID] +[ [ ...]] +.PP +The syscallstats command. +.SS "positional arguments:" +.TP + +trace path +.SS "optional arguments:" +.TP +\fB\-h\fR, \fB\-\-help\fR +show this help message and exit +.TP +\fB\-r\fR REFRESH, \fB\-\-refresh\fR REFRESH +Refresh period, with optional units suffix (default +units: s) +.TP +\fB\-\-gmt\fR +Manipulate timestamps based on GMT instead of local +time +.TP +\fB\-\-skip\-validation\fR +Skip the trace validation +.TP +\fB\-\-begin\fR BEGIN +start time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-end\fR END +end time: hh:mm:ss[.nnnnnnnnn] +.TP +\fB\-\-period\-begin\fR PERIOD_BEGIN +Analysis period start marker event name +.TP +\fB\-\-period\-end\fR PERIOD_END +Analysis period end marker event name (requires +\fB\-\-period\-begin\fR) +.TP +\fB\-\-period\-begin\-key\fR PERIOD_BEGIN_KEY +Optional, list of event field names used to match +period markers (default: cpu_id) +.TP +\fB\-\-period\-end\-key\fR PERIOD_END_KEY +Optional, list of event field names used to match +period marker. If none specified, use the same +\fB\-\-period\-begin\-key\fR +.TP +\fB\-\-period\-key\-value\fR PERIOD_KEY_VALUE +Optional, define a fixed key value to which a period +must correspond to be considered. +.TP +\fB\-\-cpu\fR CPU +Filter the results only for this list of CPU IDs +.TP +\fB\-\-timerange\fR TIMERANGE +time range: [begin,end] +.TP +\fB\-V\fR, \fB\-\-version\fR +show program's version number and exit +.TP +\fB\-\-metadata\fR +Show analysis's metadata +.TP +\fB\-\-procname\fR PROCNAME +Filter the results only for this list of process names +.TP +\fB\-\-tid\fR TID +Filter the results only for this list of TIDs diff -Nru lttnganalyses-0.3.0/debian/man/lttng-track-process.1 lttnganalyses-0.4.3/debian/man/lttng-track-process.1 --- lttnganalyses-0.3.0/debian/man/lttng-track-process.1 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/man/lttng-track-process.1 2016-03-08 15:29:30.000000000 +0000 @@ -1,5 +1,5 @@ -.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.1. -.TH LTTNG-TRACK-PROCESS "1" "July 2015" "lttng-track-process 0.3.0" "User Commands" +.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.3. +.TH LTTNG-TRACK-PROCESS "1" "March 2016" "lttng-track-process 0.4.2" "User Commands" .SH NAME lttng-track-process \- Track a process throughout a LTTng trace .SH DESCRIPTION diff -Nru lttnganalyses-0.3.0/debian/python3-lttnganalyses.manpages lttnganalyses-0.4.3/debian/python3-lttnganalyses.manpages --- lttnganalyses-0.3.0/debian/python3-lttnganalyses.manpages 2016-01-13 20:07:08.000000000 +0000 +++ lttnganalyses-0.4.3/debian/python3-lttnganalyses.manpages 2016-03-08 15:29:30.000000000 +0000 @@ -1,13 +1,32 @@ -debian/man/lttng-track-process.1 debian/man/lttng-analyses-record.1 debian/man/lttng-cputop.1 +debian/man/lttng-cputop-mi.1 debian/man/lttng-iolatencyfreq.1 +debian/man/lttng-iolatencyfreq-mi.1 debian/man/lttng-iolatencystats.1 +debian/man/lttng-iolatencystats-mi.1 debian/man/lttng-iolatencytop.1 +debian/man/lttng-iolatencytop-mi.1 debian/man/lttng-iolog.1 +debian/man/lttng-iolog-mi.1 debian/man/lttng-iousagetop.1 +debian/man/lttng-iousagetop-mi.1 debian/man/lttng-irqfreq.1 +debian/man/lttng-irqfreq-mi.1 debian/man/lttng-irqlog.1 +debian/man/lttng-irqlog-mi.1 debian/man/lttng-irqstats.1 +debian/man/lttng-irqstats-mi.1 debian/man/lttng-memtop.1 +debian/man/lttng-memtop-mi.1 +debian/man/lttng-schedfreq.1 +debian/man/lttng-schedfreq-mi.1 +debian/man/lttng-schedlog.1 +debian/man/lttng-schedlog-mi.1 +debian/man/lttng-schedstats.1 +debian/man/lttng-schedstats-mi.1 +debian/man/lttng-schedtop.1 +debian/man/lttng-schedtop-mi.1 debian/man/lttng-syscallstats.1 +debian/man/lttng-syscallstats-mi.1 +debian/man/lttng-track-process.1 diff -Nru lttnganalyses-0.3.0/LICENSE lttnganalyses-0.4.3/LICENSE --- lttnganalyses-0.3.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/LICENSE 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,5 @@ +LTTng-Analyses - Licensing + +These analyses are released under the MIT license. This license is used to +allow the use of these analyses in both free and proprietary software. See +mit-license.txt for details. diff -Nru lttnganalyses-0.3.0/lttnganalyses/ascii_graph/__init__.py lttnganalyses-0.4.3/lttnganalyses/ascii_graph/__init__.py --- lttnganalyses-0.3.0/lttnganalyses/ascii_graph/__init__.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/ascii_graph/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2012 Pierre-Francois Carpentier -# -# https://github.com/kakwa/py-ascii-graph/ -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -from __future__ import unicode_literals -import sys -import os - - -class Pyasciigraph: - def __init__(self, line_length=79, min_graph_length=50, - separator_length=2): - """Constructor of Pyasciigraph - - :param int line_length: the max number of char on a line - if any line cannot be shorter, - it will go over this limit - :param int min_graph_length: the min number of char used by the graph - :param int separator_length: the length of field separator - """ - self.line_length = line_length - self.separator_length = separator_length - self.min_graph_length = min_graph_length - - def _u(self, x): - if sys.version < '3': - import codecs - return codecs.unicode_escape_decode(x)[0] - else: - return x - - def _get_maximum(self, data): - all_max = {} - all_max['value_max_length'] = 0 - all_max['info_max_length'] = 0 - all_max['max_value'] = 0 - - for (info, value) in data: - if value > all_max['max_value']: - all_max['max_value'] = value - - if len(info) > all_max['info_max_length']: - all_max['info_max_length'] = len(info) - - if len(str(value)) > all_max['value_max_length']: - all_max['value_max_length'] = len(str(value)) - return all_max - - def _gen_graph_string(self, value, max_value, graph_length, start_value): - if max_value == 0: - number_of_square = int(value * graph_length) - else: - number_of_square = int(value * graph_length / max_value) - number_of_space = int(start_value - number_of_square) - return '█' * number_of_square + self._u(' ') * number_of_space - - def _console_size(self): - TERMSIZE = 80 - return int(os.environ.get('COLUMNS', TERMSIZE)) - 1 - - def _gen_info_string(self, info, start_info, line_length, info_before): - number_of_space = (line_length - start_info - len(info)) - if info_before: - return self._u(' ') * number_of_space + info - else: - return info + self._u(' ') * number_of_space - - def _gen_value_string(self, value, start_value, start_info, unit, count): - if not count: - v = str("%0.02f" % value) - else: - # we don't want to add .00 to count values (only integers) - v = str(value) - number_space = start_info -\ - start_value -\ - len(v) -\ - self.separator_length - - return ' ' * number_space +\ - v + str(unit) +\ - ' ' * self.separator_length - - def _sanitize_string(self, string): - # get the type of a unicode string - unicode_type = type(self._u('t')) - input_type = type(string) - if input_type is str: - if sys.version_info.major < 3: # pragma: no cover - info = string - else: - info = string - elif input_type is unicode_type: - info = string - elif input_type is int or input_type is float: - if sys.version_info.major < 3: # pragma: no cover - info = string - else: - info = str(string) - return info - - def _sanitize_data(self, data): - ret = [] - for item in data: - ret.append((self._sanitize_string(item[0]), item[1])) - return ret - - def graph(self, label, data, sort=0, with_value=True, unit="", - info_before=False, count=False): - """function generating the graph - - :param string label: the label of the graph - :param iterable data: the data (list of tuple (info, value)) - info must be "castable" to a unicode string - value must be an int or a float - :param int sort: flag sorted - 0: not sorted (same order as given) (default) - 1: increasing order - 2: decreasing order - :param boolean with_value: flag printing value - True: print the numeric value (default) - False: don't print the numeric value - :rtype: a list of strings (each lines) - - """ - result = [] - san_data = self._sanitize_data(data) - san_label = self._sanitize_string(label) - - if sort == 1: - san_data = sorted(san_data, key=lambda value: value[1], - reverse=False) - elif sort == 2: - san_data = sorted(san_data, key=lambda value: value[1], - reverse=True) - - all_max = self._get_maximum(san_data) - - real_line_length = max(self.line_length, len(label)) - - min_line_length = self.min_graph_length +\ - 2 * self.separator_length +\ - all_max['value_max_length'] +\ - all_max['info_max_length'] - - if min_line_length < real_line_length: - # calcul of where to start info - start_info = self.line_length -\ - all_max['info_max_length'] - # calcul of where to start value - start_value = start_info -\ - self.separator_length -\ - all_max['value_max_length'] - # calcul of where to end graph - graph_length = start_value -\ - self.separator_length - else: - # calcul of where to start value - start_value = self.min_graph_length +\ - self.separator_length - # calcul of where to start info - start_info = start_value +\ - all_max['value_max_length'] +\ - self.separator_length - # calcul of where to end graph - graph_length = self.min_graph_length - # calcul of the real line length - real_line_length = min_line_length - - real_line_length = min(real_line_length, self._console_size()) - result.append(san_label) - result.append(self._u('#') * real_line_length) - - for item in san_data: - info = item[0] - value = item[1] - - graph_string = self._gen_graph_string( - value, - all_max['max_value'], - graph_length, - start_value) - - if with_value: - value_string = self._gen_value_string( - value, - start_value, - start_info, unit, count) - else: - value_string = "" - - info_string = self._gen_info_string( - info, - start_info, - real_line_length, info_before) - if info_before: - new_line = info_string + " " + graph_string + value_string - else: - new_line = graph_string + value_string + info_string - result.append(new_line) - - return result - -if __name__ == '__main__': - test = [('long_label', 423), ('sl', 1234), ('line3', 531), - ('line4', 200), ('line5', 834)] - graph = Pyasciigraph() - for line in graph.graph('test print', test): - print(line) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/command.py lttnganalyses-0.4.3/lttnganalyses/cli/command.py --- lttnganalyses-0.3.0/lttnganalyses/cli/command.py 2015-07-13 21:53:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/command.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,8 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez +# 2015 - Philippe Proulx +# 2015 - Antoine Busque # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -22,34 +22,65 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from ..linuxautomaton import automaton -from .. import __version__ -from . import progressbar -from ..linuxautomaton import common -from babeltrace import TraceCollection import argparse +import json +import os +import re import sys import subprocess +from babeltrace import TraceCollection +from . import mi +from .. import _version +from . import progressbar +from .. import __version__ +from ..common import version_utils +from ..core import analysis +from ..linuxautomaton import common +from ..linuxautomaton import automaton class Command: - def __init__(self, add_arguments_cb, - enable_proc_filter_args=False, - enable_max_min_args=False, - enable_max_min_size_arg=False, - enable_freq_arg=False, - enable_log_arg=False, - enable_stats_arg=False): - self._add_arguments_cb = add_arguments_cb - self._enable_proc_filter_args = enable_proc_filter_args - self._enable_max_min_arg = enable_max_min_args - self._enable_max_min_size_arg = enable_max_min_size_arg - self._enable_freq_arg = enable_freq_arg - self._enable_log_arg = enable_log_arg - self._enable_stats_arg = enable_stats_arg + _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses'] + _MI_AUTHORS = [ + 'Julien Desfossez', + 'Antoine Busque', + 'Philippe Proulx', + ] + _MI_URL = 'https://github.com/lttng/lttng-analyses' + + def __init__(self, mi_mode=False): + self._analysis = None + self._analysis_conf = None + self._args = None + self._handles = None + self._traces = None + self._ticks = 0 + self._mi_mode = mi_mode self._create_automaton() + self._mi_setup() + + @property + def mi_mode(self): + return self._mi_mode + + def run(self): + try: + self._parse_args() + self._open_trace() + self._create_analysis() + self._run_analysis() + self._close_trace() + except KeyboardInterrupt: + sys.exit(0) def _error(self, msg, exit_code=1): + try: + import termcolor + + msg = termcolor.colored(msg, 'red', attrs=['bold']) + except ImportError: + pass + print(msg, file=sys.stderr) sys.exit(exit_code) @@ -59,262 +90,459 @@ def _cmdline_error(self, msg, exit_code=1): self._error('Command line error: {}'.format(msg), exit_code) + def _print(self, msg): + if not self._mi_mode: + print(msg) + + def _mi_create_result_table(self, table_class_name, begin, end, + subtitle=None): + return mi.ResultTable(self._mi_table_classes[table_class_name], + begin, end, subtitle) + + def _mi_setup(self): + self._mi_table_classes = {} + + for tc_tuple in self._MI_TABLE_CLASSES: + table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2]) + self._mi_table_classes[table_class.name] = table_class + + self._mi_clear_result_tables() + + def _mi_print_metadata(self): + tags = self._MI_BASE_TAGS + self._MI_TAGS + infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE, + description=self._MI_DESCRIPTION, + authors=self._MI_AUTHORS, url=self._MI_URL, + tags=tags, + table_classes=self._mi_table_classes.values()) + print(json.dumps(infos)) + + def _mi_append_result_table(self, result_table): + if not result_table or not result_table.rows: + return + + tc_name = result_table.table_class.name + self._mi_get_result_tables(tc_name).append(result_table) + + def _mi_append_result_tables(self, result_tables): + if not result_tables: + return + + for result_table in result_tables: + self._mi_append_result_table(result_table) + + def _mi_clear_result_tables(self): + self._result_tables = {} + + def _mi_get_result_tables(self, table_class_name): + if table_class_name not in self._result_tables: + self._result_tables[table_class_name] = [] + + return self._result_tables[table_class_name] + + def _mi_print(self): + results = [] + + for result_tables in self._result_tables.values(): + for result_table in result_tables: + results.append(result_table.to_native_object()) + + obj = { + 'results': results, + } + + print(json.dumps(obj)) + + def _create_summary_result_tables(self): + pass + def _open_trace(self): traces = TraceCollection() - handles = traces.add_traces_recursive(self._arg_path, 'ctf') + handles = traces.add_traces_recursive(self._args.path, 'ctf') if handles == {}: - self._gen_error('Failed to open ' + self._arg_path, -1) + self._gen_error('Failed to open ' + self._args.path, -1) self._handles = handles self._traces = traces self._process_date_args() - if not self._arg_skip_validation: + self._read_tracer_version() + if not self._args.skip_validation: self._check_lost_events() def _close_trace(self): for handle in self._handles.values(): self._traces.remove_trace(handle) + def _read_tracer_version(self): + kernel_path = None + # remove the trailing / + while self._args.path.endswith('/'): + self._args.path = self._args.path[:-1] + for root, _, _ in os.walk(self._args.path): + if root.endswith('kernel'): + kernel_path = root + break + + if kernel_path is None: + self._gen_error('Could not find kernel trace directory') + + try: + ret, metadata = subprocess.getstatusoutput( + 'babeltrace -o ctf-metadata "%s"' % kernel_path) + except subprocess.CalledProcessError: + self._gen_error('Cannot run babeltrace on the trace, cannot read' + ' tracer version') + + # fallback to reading the text metadata if babeltrace failed to + # output the CTF metadata + if ret != 0: + try: + metadata = subprocess.getoutput( + 'cat "%s"' % os.path.join(kernel_path, 'metadata')) + except subprocess.CalledProcessError: + self._gen_error('Cannot read the metadata of the trace, cannot' + 'extract tracer version') + + major_match = re.search(r'tracer_major = "*(\d+)"*', metadata) + minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata) + patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata) + + if not major_match or not minor_match or not patch_match: + self._gen_error('Malformed metadata, cannot read tracer version') + + self.state.tracer_version = version_utils.Version( + int(major_match.group(1)), + int(minor_match.group(1)), + int(patch_match.group(1)), + ) + def _check_lost_events(self): - print('Checking the trace for lost events...') + self._print('Checking the trace for lost events...') try: - subprocess.check_output('babeltrace %s' % self._arg_path, + subprocess.check_output('babeltrace "%s"' % self._args.path, shell=True) except subprocess.CalledProcessError: - print('Error running babeltrace on the trace, cannot verify if ' - 'events were lost during the trace recording') + self._gen_error('Cannot run babeltrace on the trace, cannot verify' + ' if events were lost during the trace recording') + + def _pre_analysis(self): + pass - def _run_analysis(self, reset_cb, refresh_cb, break_cb=None): - self.trace_start_ts = 0 - self.trace_end_ts = 0 - self.current_sec = 0 - self.start_ns = 0 - self.end_ns = 0 - started = False + def _post_analysis(self): + if not self._mi_mode: + return + + if self._ticks > 1: + self._create_summary_result_tables() + + self._mi_print() + + def _run_analysis(self): + self._pre_analysis() progressbar.progressbar_setup(self) - if not self._arg_begin: - started = True + for event in self._traces.events: progressbar.progressbar_update(self) - if self._arg_begin and not started and \ - event.timestamp >= self._arg_begin: - started = True - self.trace_start_ts = event.timestamp - self.start_ns = event.timestamp - reset_cb(event.timestamp) - if self._arg_end and event.timestamp > self._arg_end: - if break_cb is not None: - # check if we really can break here - if break_cb(): - break - else: - break - if self.start_ns == 0: - self.start_ns = event.timestamp - if self.trace_start_ts == 0: - self.trace_start_ts = event.timestamp - self.end_ns = event.timestamp - self._check_refresh(event, refresh_cb) - self.trace_end_ts = event.timestamp - # feed analysis self._analysis.process_event(event) - # feed automaton + if self._analysis.ended: + break self._automaton.process_event(event) - progressbar.progressbar_finish(self) - def _check_refresh(self, event, refresh_cb): - """Check if we need to output something""" - if self._arg_refresh is None: - return - event_sec = event.timestamp / common.NSEC_PER_SEC - if self.current_sec == 0: - self.current_sec = event_sec - elif self.current_sec != event_sec and \ - (self.current_sec + self._arg_refresh) <= event_sec: - refresh_cb(self.start_ns, event.timestamp) - self.current_sec = event_sec - self.start_ns = event.timestamp + progressbar.progressbar_finish(self) + self._analysis.end() + self._post_analysis() def _print_date(self, begin_ns, end_ns): date = 'Timerange: [%s, %s]' % ( - common.ns_to_hour_nsec(begin_ns, gmt=self._arg_gmt, + common.ns_to_hour_nsec(begin_ns, gmt=self._args.gmt, multi_day=True), - common.ns_to_hour_nsec(end_ns, gmt=self._arg_gmt, + common.ns_to_hour_nsec(end_ns, gmt=self._args.gmt, multi_day=True)) - print(date) - - def _validate_transform_common_args(self, args): - self._arg_path = args.path + self._print(date) - if args.limit: - self._arg_limit = args.limit + def _get_uniform_freq_values(self, durations): + if self._args.uniform_step is not None: + return (self._args.uniform_min, self._args.uniform_max, + self._args.uniform_step) - self._arg_begin = None - if args.begin: - self._arg_begin = args.begin + if self._args.min is not None: + self._args.uniform_min = self._args.min + else: + self._args.uniform_min = min(durations) + if self._args.max is not None: + self._args.uniform_max = self._args.max + else: + self._args.uniform_max = max(durations) - self._arg_end = None - if args.end: - self._arg_end = args.end + # ns to µs + self._args.uniform_min /= 1000 + self._args.uniform_max /= 1000 + self._args.uniform_step = ( + (self._args.uniform_max - self._args.uniform_min) / + self._args.freq_resolution + ) - self._arg_timerange = None - if args.timerange: - self._arg_timerange = args.timerange + return self._args.uniform_min, self._args.uniform_max, \ + self._args.uniform_step - self._arg_gmt = None - if args.gmt: - self._arg_gmt = args.gmt + def _validate_transform_common_args(self, args): + refresh_period_ns = None + if args.refresh is not None: + try: + refresh_period_ns = common.duration_str_to_ns(args.refresh) + except ValueError as e: + self._cmdline_error(str(e)) + + self._analysis_conf = analysis.AnalysisConfig() + self._analysis_conf.refresh_period = refresh_period_ns + self._analysis_conf.period_begin_ev_name = args.period_begin + self._analysis_conf.period_end_ev_name = args.period_end + self._analysis_conf.period_begin_key_fields = \ + args.period_begin_key.split(',') + + if args.period_end_key: + self._analysis_conf.period_end_key_fields = \ + args.period_end_key.split(',') + else: + self._analysis_conf.period_end_key_fields = \ + self._analysis_conf.period_begin_key_fields - self._arg_refresh = args.refresh - self._arg_no_progress = args.no_progress - self._arg_skip_validation = args.skip_validation + if args.period_key_value: + self._analysis_conf.period_key_value = \ + tuple(args.period_key_value.split(',')) + + if args.cpu: + self._analysis_conf.cpu_list = args.cpu.split(',') + self._analysis_conf.cpu_list = [int(cpu) for cpu in + self._analysis_conf.cpu_list] + + # convert min/max args from µs to ns, if needed + if hasattr(args, 'min') and args.min is not None: + args.min *= 1000 + self._analysis_conf.min_duration = args.min + if hasattr(args, 'max') and args.max is not None: + args.max *= 1000 + self._analysis_conf.max_duration = args.max - if self._enable_proc_filter_args: - self._arg_proc_list = None + if hasattr(args, 'procname'): if args.procname: - self._arg_proc_list = args.procname.split(',') + self._analysis_conf.proc_list = args.procname.split(',') - self._arg_pid_list = None - if args.pid: - self._arg_pid_list = args.pid.split(',') - self._arg_pid_list = [int(pid) for pid in self._arg_pid_list] - - if self._enable_max_min_arg: - self._arg_max = args.max - self._arg_min = args.min - - if self._enable_max_min_size_arg: - self._arg_maxsize = args.maxsize - self._arg_minsize = args.minsize - - if self._enable_freq_arg: - self._arg_freq = args.freq - self._arg_freq_resolution = args.freq_resolution + if hasattr(args, 'tid'): + if args.tid: + self._analysis_conf.tid_list = args.tid.split(',') + self._analysis_conf.tid_list = [int(tid) for tid in + self._analysis_conf.tid_list] + + if hasattr(args, 'freq'): + args.uniform_min = None + args.uniform_max = None + args.uniform_step = None + + if args.freq_series: + # implies uniform buckets + args.freq_uniform = True + + if self._mi_mode: + # force no progress in MI mode + args.no_progress = True + + # print MI metadata if required + if args.metadata: + self._mi_print_metadata() + sys.exit(0) + + # validate path argument (required at this point) + if not args.path: + self._cmdline_error('Please specify a trace path') - if self._enable_log_arg: - self._arg_log = args.log + if type(args.path) is list: + args.path = args.path[0] - if self._enable_stats_arg: - self._arg_stats = args.stats + def _validate_transform_args(self, args): + pass def _parse_args(self): ap = argparse.ArgumentParser(description=self._DESC) # common arguments - ap.add_argument('path', metavar='', help='trace path') - ap.add_argument('-r', '--refresh', type=int, - help='Refresh period in seconds') - ap.add_argument('--limit', type=int, default=10, - help='Limit to top X (default = 10)') - ap.add_argument('--no-progress', action='store_true', - help='Don\'t display the progress bar') - ap.add_argument('--skip-validation', action='store_true', - help='Skip the trace validation') + ap.add_argument('-r', '--refresh', type=str, + help='Refresh period, with optional units suffix ' + '(default units: s)') ap.add_argument('--gmt', action='store_true', help='Manipulate timestamps based on GMT instead ' 'of local time') + ap.add_argument('--skip-validation', action='store_true', + help='Skip the trace validation') ap.add_argument('--begin', type=str, help='start time: ' 'hh:mm:ss[.nnnnnnnnn]') ap.add_argument('--end', type=str, help='end time: ' 'hh:mm:ss[.nnnnnnnnn]') + ap.add_argument('--period-begin', type=str, + help='Analysis period start marker event name') + ap.add_argument('--period-end', type=str, + help='Analysis period end marker event name ' + '(requires --period-begin)') + ap.add_argument('--period-begin-key', type=str, default='cpu_id', + help='Optional, list of event field names used to ' + 'match period markers (default: cpu_id)') + ap.add_argument('--period-end-key', type=str, + help='Optional, list of event field names used to ' + 'match period marker. If none specified, use the same ' + ' --period-begin-key') + ap.add_argument('--period-key-value', type=str, + help='Optional, define a fixed key value to which a' + ' period must correspond to be considered.') + ap.add_argument('--cpu', type=str, + help='Filter the results only for this list of ' + 'CPU IDs') ap.add_argument('--timerange', type=str, help='time range: ' '[begin,end]') - - if self._enable_proc_filter_args: - ap.add_argument('--procname', type=str, - help='Filter the results only for this list of ' - 'process names') - ap.add_argument('--pid', type=str, - help='Filter the results only for this list ' - 'of PIDs') - - if self._enable_max_min_arg: - ap.add_argument('--max', type=float, - help='Filter out, duration longer than max usec') - ap.add_argument('--min', type=float, - help='Filter out, duration shorter than min usec') - - if self._enable_max_min_size_arg: - ap.add_argument('--maxsize', type=float, - help='Filter out, I/O operations working with ' - 'more that maxsize bytes') - ap.add_argument('--minsize', type=float, - help='Filter out, I/O operations working with ' - 'less that minsize bytes') - - if self._enable_freq_arg: - ap.add_argument('--freq', action='store_true', - help='Show the frequency distribution of ' - 'handler duration') - ap.add_argument('--freq-resolution', type=int, default=20, - help='Frequency distribution resolution ' - '(default 20)') - - if self._enable_log_arg: - ap.add_argument('--log', action='store_true', - help='Display the events in the order they ' - 'appeared') - - if self._enable_stats_arg: - ap.add_argument('--stats', action='store_true', - help='Display the statistics') - - # specific arguments - self._add_arguments_cb(ap) - - # version of the specific command ap.add_argument('-V', '--version', action='version', version='LTTng Analyses v' + __version__) - # parse arguments - args = ap.parse_args() + # MI mode-dependent arguments + if self._mi_mode: + ap.add_argument('--metadata', action='store_true', + help='Show analysis\'s metadata') + ap.add_argument('path', metavar='', + help='trace path', nargs='*') + else: + ap.add_argument('--no-progress', action='store_true', + help='Don\'t display the progress bar') + ap.add_argument('path', metavar='', + help='trace path') - self._validate_transform_common_args(args) + # Used to add command-specific args + self._add_arguments(ap) - # save all arguments + args = ap.parse_args() + self._validate_transform_common_args(args) + self._validate_transform_args(args) self._args = args + @staticmethod + def _add_proc_filter_args(ap): + ap.add_argument('--procname', type=str, + help='Filter the results only for this list of ' + 'process names') + ap.add_argument('--tid', type=str, + help='Filter the results only for this list of TIDs') + + @staticmethod + def _add_min_max_args(ap): + ap.add_argument('--min', type=float, + help='Filter out durations shorter than min usec') + ap.add_argument('--max', type=float, + help='Filter out durations longer than max usec') + + @staticmethod + def _add_freq_args(ap, help=None): + if not help: + help = 'Output the frequency distribution' + + ap.add_argument('--freq', action='store_true', help=help) + ap.add_argument('--freq-resolution', type=int, default=20, + help='Frequency distribution resolution ' + '(default 20)') + ap.add_argument('--freq-uniform', action='store_true', + help='Use a uniform resolution across distributions') + ap.add_argument('--freq-series', action='store_true', + help='Consolidate frequency distribution histogram ' + 'as a single one') + + @staticmethod + def _add_log_args(ap, help=None): + if not help: + help = 'Output the events in chronological order' + + ap.add_argument('--log', action='store_true', help=help) + + @staticmethod + def _add_top_args(ap, help=None): + if not help: + help = 'Output the top results' + + ap.add_argument('--limit', type=int, default=10, + help='Limit to top X (default = 10)') + ap.add_argument('--top', action='store_true', help=help) + + @staticmethod + def _add_stats_args(ap, help=None): + if not help: + help = 'Output statistics' + + ap.add_argument('--stats', action='store_true', help=help) + + def _add_arguments(self, ap): + pass + def _process_date_args(self): - self._arg_multi_day = common.is_multi_day_trace_collection( + def date_to_epoch_nsec(date): + ts = common.date_to_epoch_nsec(self._handles, date, self._args.gmt) + if ts is None: + self._cmdline_error('Invalid date format: "{}"'.format(date)) + + return ts + + self._args.multi_day = common.is_multi_day_trace_collection( self._handles) - if self._arg_timerange: - (self._arg_begin, self._arg_end) = \ - common.extract_timerange(self._handles, self._arg_timerange, - self._arg_gmt) - if self._arg_begin is None or self._arg_end is None: - print('Invalid timeformat') - sys.exit(1) + begin_ts = None + end_ts = None + + if self._args.timerange: + begin_ts, end_ts = common.extract_timerange(self._handles, + self._args.timerange, + self._args.gmt) + if None in [begin_ts, end_ts]: + self._cmdline_error( + 'Invalid time format: "{}"'.format(self._args.timerange)) else: - if self._arg_begin: - self._arg_begin = common.date_to_epoch_nsec(self._handles, - self._arg_begin, - self._arg_gmt) - if self._arg_begin is None: - print('Invalid timeformat') - sys.exit(1) - if self._arg_end: - self._arg_end = common.date_to_epoch_nsec(self._handles, - self._arg_end, - self._arg_gmt) - if self._arg_end is None: - print('Invalid timeformat') - sys.exit(1) + if self._args.begin: + begin_ts = date_to_epoch_nsec(self._args.begin) + if self._args.end: + end_ts = date_to_epoch_nsec(self._args.end) # We have to check if timestamp_begin is None, which # it always is in older versions of babeltrace. In # that case, the test is simply skipped and an invalid # --end value will cause an empty analysis if self._traces.timestamp_begin is not None and \ - self._arg_end < self._traces.timestamp_begin: - print('--end timestamp before beginning of trace') - sys.exit(1) + end_ts < self._traces.timestamp_begin: + self._cmdline_error( + '--end timestamp before beginning of trace') + + self._analysis_conf.begin_ts = begin_ts + self._analysis_conf.end_ts = end_ts + + def _create_analysis(self): + notification_cbs = { + analysis.Analysis.TICK_CB: self._analysis_tick_cb + } + + self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf) + self._analysis.register_notification_cbs(notification_cbs) def _create_automaton(self): self._automaton = automaton.Automaton() self.state = self._automaton.state - def _filter_process(self, proc): - if self._arg_proc_list and proc.comm not in self._arg_proc_list: - return False - if self._arg_pid_list and proc.pid not in self._arg_pid_list: - return False - return True + def _analysis_tick_cb(self, **kwargs): + begin_ns = kwargs['begin_ns'] + end_ns = kwargs['end_ns'] + + self._analysis_tick(begin_ns, end_ns) + self._ticks += 1 + + def _analysis_tick(self, begin_ns, end_ns): + raise NotImplementedError() + + +# create MI version +_cmd_version = _version.get_versions()['version'] +_version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version) +Command._MI_VERSION = version_utils.Version( + int(_version_match.group(1)), + int(_version_match.group(2)), + int(_version_match.group(3)), + _version_match.group(4), +) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/cputop.py lttnganalyses-0.4.3/lttnganalyses/cli/cputop.py --- lttnganalyses-0.3.0/lttnganalyses/cli/cputop.py 2015-07-13 21:53:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/cputop.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,9 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque +# 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,110 +22,139 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import operator +from ..common import format_utils from .command import Command from ..core import cputop -from ..ascii_graph import Pyasciigraph -import operator +from . import mi +from . import termgraph class Cputop(Command): _DESC = """The cputop command.""" - - def __init__(self): - super().__init__(self._add_arguments, enable_proc_filter_args=True) - - def _validate_transform_args(self): - pass - - def run(self): - # parse arguments first - self._parse_args() - # validate, transform and save specific arguments - self._validate_transform_args() - # open the trace - self._open_trace() - # create the appropriate analysis/analyses - self._create_analysis() - # run the analysis - self._run_analysis(self._reset_total, self._refresh) - # process the results - self._compute_stats() - # print results - self._print_results(self.start_ns, self.trace_end_ts) - # close the trace - self._close_trace() - - def _create_analysis(self): - self._analysis = cputop.Cputop(self.state) - - def _compute_stats(self): - self._analysis.compute_stats(self.start_ns, self.end_ns) - - def _reset_total(self, start_ts): - self._analysis.reset(start_ts) - - def _refresh(self, begin, end): - self._compute_stats() - self._print_results(begin, end) - self._reset_total(end) - - def _filter_process(self, proc): - # Exclude swapper - if proc.tid == 0: - return False - - if self._arg_proc_list and proc.comm not in self._arg_proc_list: - return False - - return True - - def _print_results(self, begin_ns, end_ns): - self._print_date(begin_ns, end_ns) - self._print_per_tid_usage() - self._print_per_cpu_usage() - self._print_total_cpu_usage() - - def _print_per_tid_usage(self): + _ANALYSIS_CLASS = cputop.Cputop + _MI_TITLE = 'Top CPU usage' + _MI_DESCRIPTION = 'Per-TID, per-CPU, and total top CPU usage' + _MI_TAGS = [mi.Tags.CPU, mi.Tags.TOP] + _MI_TABLE_CLASS_PER_PROC = 'per-process' + _MI_TABLE_CLASS_PER_CPU = 'per-cpu' + _MI_TABLE_CLASS_TOTAL = 'total' + _MI_TABLE_CLASS_SUMMARY = 'summary' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_PER_PROC, + 'Per-TID top CPU usage', [ + ('process', 'Process', mi.Process), + ('migrations', 'Migration count', mi.Integer, 'migrations'), + ('prio_list', 'Chronological priorities', mi.String), + ('usage', 'CPU usage', mi.Ratio), + ] + ), + ( + _MI_TABLE_CLASS_PER_CPU, + 'Per-CPU top CPU usage', [ + ('cpu', 'CPU', mi.Cpu), + ('usage', 'CPU usage', mi.Ratio), + ]), + ( + _MI_TABLE_CLASS_TOTAL, + 'Total CPU usage', [ + ('usage', 'CPU usage', mi.Ratio), + ] + ), + ( + _MI_TABLE_CLASS_SUMMARY, + 'CPU usage - summary', [ + ('time_range', 'Time range', mi.TimeRange), + ('usage', 'Total CPU usage', mi.Ratio), + ] + ), + ] + + def _analysis_tick(self, begin_ns, end_ns): + per_tid_table = self._get_per_tid_usage_result_table(begin_ns, end_ns) + per_cpu_table = self._get_per_cpu_usage_result_table(begin_ns, end_ns) + total_table = self._get_total_usage_result_table(begin_ns, end_ns) + + if self._mi_mode: + self._mi_append_result_table(per_tid_table) + self._mi_append_result_table(per_cpu_table) + self._mi_append_result_table(total_table) + else: + self._print_date(begin_ns, end_ns) + self._print_per_tid_usage(per_tid_table) + self._print_per_cpu_usage(per_cpu_table) + + if total_table: + self._print_total_cpu_usage(total_table) + + def _create_summary_result_tables(self): + total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) + begin = total_tables[0].timerange.begin + end = total_tables[-1].timerange.end + summary_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, + begin, end) + + for total_table in total_tables: + usage = total_table.rows[0].usage + summary_table.append_row( + time_range=total_table.timerange, + usage=usage, + ) + + self._mi_clear_result_tables() + self._mi_append_result_table(summary_table) + + def _get_per_tid_usage_result_table(self, begin_ns, end_ns): + result_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROC, + begin_ns, end_ns) count = 0 - limit = self._arg_limit - graph = Pyasciigraph() - values = [] for tid in sorted(self._analysis.tids.values(), key=operator.attrgetter('usage_percent'), reverse=True): - if not self._filter_process(tid): - continue - - output_str = '%s (%d)' % (tid.comm, tid.tid) - if tid.migrate_count > 0: - output_str += ', %d migrations' % (tid.migrate_count) - - values.append((output_str, tid.usage_percent)) + prio_list = format_utils.format_prio_list(tid.prio_list) + result_table.append_row( + process=mi.Process(tid.comm, tid=tid.tid), + migrations=mi.Integer(tid.migrate_count), + prio_list=mi.String(prio_list), + usage=mi.Ratio.from_percentage(tid.usage_percent) + ) count += 1 - if limit > 0 and count >= limit: + + if self._args.limit > 0 and count >= self._args.limit: break - for line in graph.graph('Per-TID CPU Usage', values, unit=' %'): - print(line) + return result_table - def _print_per_cpu_usage(self): - graph = Pyasciigraph() - values = [] + def _get_per_cpu_usage_result_table(self, begin_ns, end_ns): + result_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_PER_CPU, + begin_ns, end_ns) for cpu in sorted(self._analysis.cpus.values(), - key=operator.attrgetter('usage_percent'), - reverse=True): - values.append(('CPU %d' % cpu.cpu_id, cpu.usage_percent)) - - for line in graph.graph('Per-CPU Usage', values, unit=' %'): - print(line) + key=operator.attrgetter('cpu_id')): + result_table.append_row( + cpu=mi.Cpu(cpu.cpu_id), + usage=mi.Ratio.from_percentage(cpu.usage_percent) + ) + + return result_table + + def _get_total_usage_result_table(self, begin_ns, end_ns): + result_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, + begin_ns, end_ns) - def _print_total_cpu_usage(self): cpu_count = len(self.state.cpus) usage_percent = 0 + if not cpu_count: + return + for cpu in sorted(self._analysis.cpus.values(), key=operator.attrgetter('usage_percent'), reverse=True): @@ -134,17 +162,62 @@ # average per CPU usage_percent /= cpu_count + result_table.append_row( + usage=mi.Ratio.from_percentage(usage_percent), + ) + + return result_table + + def _print_per_tid_usage(self, result_table): + row_format = ' {:<25} {:>10} {}' + label_header = row_format.format('Process', 'Migrations', 'Priorities') + + def format_label(row): + return row_format.format( + '%s (%d)' % (row.process.name, row.process.tid), + row.migrations.value, + row.prio_list.value, + ) + + graph = termgraph.BarGraph( + title='Per-TID Usage', + unit='%', + get_value=lambda row: row.usage.to_percentage(), + get_label=format_label, + label_header=label_header, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_cpu_usage(self, result_table): + graph = termgraph.BarGraph( + title='Per-CPU Usage', + unit='%', + get_value=lambda row: row.usage.to_percentage(), + get_label=lambda row: 'CPU %d' % row.cpu.id, + data=result_table.rows + ) + + graph.print_graph() + + def _print_total_cpu_usage(self, result_table): + usage_percent = result_table.rows[0].usage.to_percentage() print('\nTotal CPU Usage: %0.02f%%\n' % usage_percent) def _add_arguments(self, ap): - # specific argument - pass + Command._add_proc_filter_args(ap) + Command._add_top_args(ap) + + +def _run(mi_mode): + cputopcmd = Cputop(mi_mode=mi_mode) + cputopcmd.run() -# entry point def run(): - # create command - cputopcmd = Cputop() + _run(mi_mode=False) - # execute command - cputopcmd.run() + +def run_mi(): + _run(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/__init__.py lttnganalyses-0.4.3/lttnganalyses/cli/__init__.py --- lttnganalyses-0.3.0/lttnganalyses/cli/__init__.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/__init__.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/io.py lttnganalyses-0.4.3/lttnganalyses/cli/io.py --- lttnganalyses-0.3.0/lttnganalyses/cli/io.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/io.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,9 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque +# 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,426 +22,706 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from .command import Command -from ..core import io -from ..linuxautomaton import common -from ..ascii_graph import Pyasciigraph +import collections import operator import statistics +import sys +from . import mi +from . import termgraph +from ..core import io +from ..common import format_utils +from .command import Command +from ..linuxautomaton import common + + +_UsageTables = collections.namedtuple('_UsageTables', [ + 'per_proc_read', + 'per_proc_write', + 'per_file_read', + 'per_file_write', + 'per_proc_block_read', + 'per_proc_block_write', + 'per_disk_sector', + 'per_disk_request', + 'per_disk_rtps', + 'per_netif_recv', + 'per_netif_send', +]) class IoAnalysisCommand(Command): _DESC = """The I/O command.""" - + _ANALYSIS_CLASS = io.IoAnalysis + _MI_TITLE = 'I/O analysis' + _MI_DESCRIPTION = 'System call/disk latency statistics, system call ' + \ + 'latency distribution, system call top latencies, ' + \ + 'I/O usage top, and I/O operations log' + _MI_TAGS = [ + mi.Tags.IO, + mi.Tags.SYSCALL, + mi.Tags.STATS, + mi.Tags.FREQ, + mi.Tags.LOG, + mi.Tags.TOP, + ] + _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS = 'syscall-latency-stats' + _MI_TABLE_CLASS_PART_LATENCY_STATS = 'disk-latency-stats' + _MI_TABLE_CLASS_FREQ = 'freq' + _MI_TABLE_CLASS_TOP_SYSCALL = 'top-syscall' + _MI_TABLE_CLASS_LOG = 'log' + _MI_TABLE_CLASS_PER_PROCESS_TOP = 'per-process-top' + _MI_TABLE_CLASS_PER_FILE_TOP = 'per-file-top' + _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK = 'per-process-top-block' + _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR = 'per-disk-top-sector' + _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST = 'per-disk-top-request' + _MI_TABLE_CLASS_PER_DISK_TOP_RTPS = 'per-disk-top-rps' + _MI_TABLE_CLASS_PER_NETIF_TOP = 'per-netif-top' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS, + 'System call latency statistics', [ + ('obj', 'System call category', mi.String), + ('count', 'Call count', mi.Integer, 'calls'), + ('min_latency', 'Minimum call latency', mi.Duration), + ('avg_latency', 'Average call latency', mi.Duration), + ('max_latency', 'Maximum call latency', mi.Duration), + ('stdev_latency', 'System call latency standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_PART_LATENCY_STATS, + 'Partition latency statistics', [ + ('obj', 'Partition', mi.Disk), + ('count', 'Access count', mi.Integer, 'accesses'), + ('min_latency', 'Minimum access latency', mi.Duration), + ('avg_latency', 'Average access latency', mi.Duration), + ('max_latency', 'Maximum access latency', mi.Duration), + ('stdev_latency', 'System access latency standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_FREQ, + 'I/O request latency distribution', [ + ('latency_lower', 'Latency (lower bound)', mi.Duration), + ('latency_upper', 'Latency (upper bound)', mi.Duration), + ('count', 'Request count', mi.Integer, 'requests'), + ] + ), + ( + _MI_TABLE_CLASS_TOP_SYSCALL, + 'Top system call latencies', [ + ('time_range', 'Call time range', mi.TimeRange), + ('out_of_range', 'System call out of range?', mi.Boolean), + ('duration', 'Call duration', mi.Duration), + ('syscall', 'System call', mi.Syscall), + ('size', 'Read/write size', mi.Size), + ('process', 'Process', mi.Process), + ('path', 'File path', mi.Path), + ('fd', 'File descriptor', mi.Fd), + ] + ), + ( + _MI_TABLE_CLASS_LOG, + 'I/O operations log', [ + ('time_range', 'Call time range', mi.TimeRange), + ('out_of_range', 'System call out of range?', mi.Boolean), + ('duration', 'Call duration', mi.Duration), + ('syscall', 'System call', mi.Syscall), + ('size', 'Read/write size', mi.Size), + ('process', 'Process', mi.Process), + ('path', 'File path', mi.Path), + ('fd', 'File descriptor', mi.Fd), + ] + ), + ( + _MI_TABLE_CLASS_PER_PROCESS_TOP, + 'Per-process top I/O operations', [ + ('process', 'Process', mi.Process), + ('size', 'Total operations size', mi.Size), + ('disk_size', 'Disk operations size', mi.Size), + ('net_size', 'Network operations size', mi.Size), + ('unknown_size', 'Unknown operations size', mi.Size), + ] + ), + ( + _MI_TABLE_CLASS_PER_FILE_TOP, + 'Per-file top I/O operations', [ + ('path', 'File path/info', mi.Path), + ('size', 'Operations size', mi.Size), + ('fd_owners', 'File descriptor owners', mi.String), + ] + ), + ( + _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, + 'Per-process top block I/O operations', [ + ('process', 'Process', mi.Process), + ('size', 'Operations size', mi.Size), + ] + ), + ( + _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR, + 'Per-disk top sector I/O operations', [ + ('disk', 'Disk', mi.Disk), + ('count', 'Sector count', mi.Integer, 'sectors'), + ] + ), + ( + _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST, + 'Per-disk top I/O requests', [ + ('disk', 'Disk', mi.Disk), + ('count', 'Request count', mi.Integer, 'I/O requests'), + ] + ), + ( + _MI_TABLE_CLASS_PER_DISK_TOP_RTPS, + 'Per-disk top I/O request time/sector', [ + ('disk', 'Disk', mi.Disk), + ('rtps', 'Request time/sector', mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_PER_NETIF_TOP, + 'Per-network interface top I/O operations', [ + ('netif', 'Network interface', mi.NetIf), + ('size', 'Operations size', mi.Size), + ] + ), + ] _LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}' _SECTION_SEPARATOR_STRING = '-' * 89 - def __init__(self): - super().__init__(self._add_arguments, - enable_proc_filter_args=True, - enable_max_min_args=True, - enable_max_min_size_arg=True, - enable_log_arg=True) - - def _validate_transform_args(self): - self._arg_usage = self._args.usage - self._arg_stats = self._args.latencystats - self._arg_latencytop = self._args.latencytop - self._arg_freq = self._args.latencyfreq - self._arg_freq_resolution = self._args.freq_resolution - - def _default_args(self, stats, log, freq, usage, latencytop): - if stats: - self._arg_stats = True - if log: - self._arg_log = True - if freq: - self._arg_freq = True - if usage: - self._arg_usage = True - if latencytop: - self._arg_latencytop = True - - def run(self, stats=False, log=False, freq=False, usage=False, - latencytop=False): - # parse arguments first - self._parse_args() - # validate, transform and save specific arguments - self._validate_transform_args() - # handle the default args for different executables - self._default_args(stats, log, freq, usage, latencytop) - # open the trace - self._open_trace() - # create the appropriate analysis/analyses - self._create_analysis() - # run the analysis - self._run_analysis(self._reset_total, self._refresh) - # print results - self._print_results(self.start_ns, self.trace_end_ts) - # close the trace - self._close_trace() - - def run_stats(self): - self.run(stats=True) - - def run_latencytop(self): - self.run(latencytop=True) - - def run_log(self): - self.run(log=True) - - def run_freq(self): - self.run(freq=True) - - def run_usage(self): - self.run(usage=True) - - def _create_analysis(self): - self._analysis = io.IoAnalysis(self.state) - - def _refresh(self, begin, end): - self._print_results(begin, end) - self._reset_total(end) + def _analysis_tick(self, begin_ns, end_ns): + syscall_latency_stats_table = None + disk_latency_stats_table = None + freq_tables = None + top_tables = None + log_table = None + usage_tables = None + + if self._args.stats: + syscall_latency_stats_table, disk_latency_stats_table = \ + self._get_latency_stats_result_tables(begin_ns, end_ns) + + if self._args.freq: + freq_tables = self._get_freq_result_tables(begin_ns, end_ns) + + if self._args.usage: + usage_tables = self._get_usage_result_tables(begin_ns, end_ns) + + if self._args.top: + top_tables = self._get_top_result_tables(begin_ns, end_ns) + + if self._args.log: + log_table = self._get_log_result_table(begin_ns, end_ns) + + if self._mi_mode: + self._mi_append_result_tables([ + log_table, + syscall_latency_stats_table, + disk_latency_stats_table, + ]) + self._mi_append_result_tables(top_tables) + self._mi_append_result_tables(usage_tables) + self._mi_append_result_tables(freq_tables) + else: + self._print_date(begin_ns, end_ns) + + if self._args.usage: + self._print_usage(usage_tables) + + if self._args.stats: + self._print_latency_stats(syscall_latency_stats_table, + disk_latency_stats_table) + + if self._args.top: + self._print_top(top_tables) + + if self._args.freq: + self._print_freq(freq_tables) + + if self._args.log: + self._print_log(log_table) + + def _create_summary_result_tables(self): + # TODO: create a summary table here + self._mi_clear_result_tables() # Filter predicates def _filter_size(self, size): if size is None: return True - if self._arg_maxsize is not None and size > self._arg_maxsize: + if self._args.maxsize is not None and size > self._args.maxsize: return False - if self._arg_minsize is not None and size < self._arg_minsize: + if self._args.minsize is not None and size < self._args.minsize: return False return True def _filter_latency(self, duration): - if self._arg_max is not None and (duration/1000) > self._arg_max: + if self._args.max is not None and duration > self._args.max: return False - if self._arg_min is not None and (duration/1000) < self._arg_min: + if self._args.min is not None and duration < self._args.min: return False return True def _filter_time_range(self, begin, end): - return not (self._arg_begin and self._arg_end and end and - begin > self._arg_end) + # Note: we only want to return False only when a request has + # ended and is completely outside the timerange (i.e. begun + # after the end of the time range). + return not (self._args.begin and self._args.end and end and + begin > self._args.end) def _filter_io_request(self, io_rq): - proc = self._analysis.tids[io_rq.tid] - return self._filter_process(proc) and \ - self._filter_size(io_rq.size) and \ + return self._filter_size(io_rq.size) and \ self._filter_latency(io_rq.duration) and \ self._filter_time_range(io_rq.begin_ts, io_rq.end_ts) def _is_io_rq_out_of_range(self, io_rq): - return self._arg_begin and io_rq.begin_ts < self._arg_begin or \ - self._arg_end and io_rq.end_ts > self._arg_end - - def _print_ascii_graph(self, input_list, get_datum_cb, graph_label, - graph_args=None): - """Print an ascii graph for given data - - This method wraps the ascii_graph module and facilitates the - printing of a graph with a limited number of lines. - - Args: - input_list (list): A list of objects from which the data - for the graph will be generated. - - get_datum_cb (function): function that takes a single - object from the input list as an argument, and returns a - datum tuple for the graph, of the form (string, int). The - string element is printed as is in the graph, and the int - is the numeric value corresponding to this graph entry. - - graph_label (string): Label used to identify the printed - graph. - - graph_args (dict, optional): Dict of keyword args to be - passed to the graph() function as is. - """ - count = 0 - limit = self._arg_limit - graph = Pyasciigraph() - data = [] - if graph_args is None: - graph_args = {} - - for elem in input_list: - datum = get_datum_cb(elem) - if datum is not None: - data.append(datum) - count += 1 - if limit is not None and count >= limit: - break + return self._args.begin and io_rq.begin_ts < self._args.begin or \ + self._args.end and io_rq.end_ts > self._args.end - for line in graph.graph(graph_label, data, **graph_args): - print(line) - - # I/O Top output methods - def _get_read_datum(self, proc_stats): - if not self._filter_process(proc_stats): - return None + def _append_per_proc_read_usage_row(self, proc_stats, result_table): + result_table.append_row( + process=mi.Process(proc_stats.comm, pid=proc_stats.pid, + tid=proc_stats.tid), + size=mi.Size(proc_stats.total_read), + disk_size=mi.Size(proc_stats.disk_io.read), + net_size=mi.Size(proc_stats.net_io.read), + unknown_size=mi.Size(proc_stats.unk_io.read), + ) - if proc_stats.pid is None: - pid_str = 'unknown (tid=%d)' % (proc_stats.tid) - else: - pid_str = str(proc_stats.pid) + return True - format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown' - output_str = format_str.format( - common.convert_size(proc_stats.total_read, padding_after=True), - '%s (%s)' % (proc_stats.comm, pid_str), - common.convert_size(proc_stats.disk_read, padding_after=True), - common.convert_size(proc_stats.net_read, padding_after=True), - common.convert_size(proc_stats.unk_read, padding_after=True)) + def _append_per_proc_write_usage_row(self, proc_stats, result_table): + result_table.append_row( + process=mi.Process(proc_stats.comm, pid=proc_stats.pid, + tid=proc_stats.tid), + size=mi.Size(proc_stats.total_write), + disk_size=mi.Size(proc_stats.disk_io.write), + net_size=mi.Size(proc_stats.net_io.write), + unknown_size=mi.Size(proc_stats.unk_io.write), + ) - return (output_str, proc_stats.total_read) + return True - def _get_write_datum(self, proc_stats): - if not self._filter_process(proc_stats): - return None + def _append_per_proc_block_read_usage_row(self, proc_stats, result_table): + if proc_stats.block_io.read == 0: + return False - if proc_stats.pid is None: - pid_str = 'unknown (tid=%d)' % (proc_stats.tid) + if proc_stats.comm: + proc_name = proc_stats.comm else: - pid_str = str(proc_stats.pid) + proc_name = None - format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown' - output_str = format_str.format( - common.convert_size(proc_stats.total_write, padding_after=True), - '%s (%s)' % (proc_stats.comm, pid_str), - common.convert_size(proc_stats.disk_write, padding_after=True), - common.convert_size(proc_stats.net_write, padding_after=True), - common.convert_size(proc_stats.unk_write, padding_after=True)) - - return (output_str, proc_stats.total_write) + result_table.append_row( + process=mi.Process(proc_name, pid=proc_stats.pid, + tid=proc_stats.tid), + size=mi.Size(proc_stats.block_io.read), + ) - def _get_block_read_datum(self, proc_stats): - if not self._filter_process(proc_stats) or proc_stats.block_read == 0: - return None + return True - comm = proc_stats.comm - if not comm: - comm = 'unknown' + def _append_per_proc_block_write_usage_row(self, proc_stats, result_table): + if proc_stats.block_io.write == 0: + return False - if proc_stats.pid is None: - pid_str = 'unknown (tid=%d)' % (proc_stats.tid) + if proc_stats.comm: + proc_name = proc_stats.comm else: - pid_str = str(proc_stats.pid) + proc_name = None - format_str = '{:>10} {:<22}' - output_str = format_str.format( - common.convert_size(proc_stats.block_read, padding_after=True), - '%s (pid=%s)' % (comm, pid_str)) + result_table.append_row( + process=mi.Process(proc_name, pid=proc_stats.pid, + tid=proc_stats.tid), + size=mi.Size(proc_stats.block_io.write), + ) - return (output_str, proc_stats.block_read) + return True - def _get_block_write_datum(self, proc_stats): - if not self._filter_process(proc_stats) or \ - proc_stats.block_write == 0: + def _append_disk_sector_usage_row(self, disk_stats, result_table): + if disk_stats.total_rq_sectors == 0: return None - comm = proc_stats.comm - if not comm: - comm = 'unknown' + result_table.append_row( + disk=mi.Disk(disk_stats.disk_name), + count=mi.Integer(disk_stats.total_rq_sectors), + ) - if proc_stats.pid is None: - pid_str = 'unknown (tid=%d)' % (proc_stats.tid) - else: - pid_str = str(proc_stats.pid) + return True - format_str = '{:>10} {:<22}' - output_str = format_str.format( - common.convert_size(proc_stats.block_write, padding_after=True), - '%s (pid=%s)' % (comm, pid_str)) + def _append_disk_request_usage_row(self, disk_stats, result_table): + if disk_stats.rq_count == 0: + return False - return (output_str, proc_stats.block_write) + result_table.append_row( + disk=mi.Disk(disk_stats.disk_name), + count=mi.Integer(disk_stats.rq_count), + ) - def _get_total_rq_sectors_datum(self, disk): - if disk.total_rq_sectors == 0: - return None + return True - return (disk.disk_name, disk.total_rq_sectors) + def _append_disk_rtps_usage_row(self, disk_stats, result_table): + if disk_stats.rq_count == 0: + return False - def _get_rq_count_datum(self, disk): - if disk.rq_count == 0: - return None + avg_latency = (disk_stats.total_rq_duration / disk_stats.rq_count) + result_table.append_row( + disk=mi.Disk(disk_stats.disk_name), + rtps=mi.Duration(avg_latency), + ) + + return True - return (disk.disk_name, disk.rq_count) + def _append_netif_recv_usage_row(self, netif_stats, result_table): + result_table.append_row( + netif=mi.NetIf(netif_stats.name), + size=mi.Size(netif_stats.recv_bytes) + ) - def _get_avg_disk_latency_datum(self, disk): - if disk.rq_count == 0: - return None + return True - avg_latency = ((disk.total_rq_duration / disk.rq_count) / - common.MSEC_PER_NSEC) - avg_latency = round(avg_latency, 3) - - return ('%s' % disk.disk_name, avg_latency) - - def _get_net_recv_bytes_datum(self, iface): - return ('%s %s' % (common.convert_size(iface.recv_bytes), iface.name), - iface.recv_bytes) - - def _get_net_sent_bytes_datum(self, iface): - return ('%s %s' % (common.convert_size(iface.sent_bytes), iface.name), - iface.sent_bytes) + def _append_netif_send_usage_row(self, netif_stats, result_table): + result_table.append_row( + netif=mi.NetIf(netif_stats.name), + size=mi.Size(netif_stats.sent_bytes) + ) - def _get_file_read_datum(self, file_stats): - if file_stats.read == 0: - return None + return True + def _get_file_stats_fd_owners_str(self, file_stats): fd_by_pid_str = '' + for pid, fd in file_stats.fd_by_pid.items(): comm = self._analysis.tids[pid].comm fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid) - format_str = '{:>10} {} {}' - output_str = format_str.format( - common.convert_size(file_stats.read, padding_after=True), - file_stats.filename, - fd_by_pid_str) + return fd_by_pid_str - return (output_str, file_stats.read) + def _append_file_read_usage_row(self, file_stats, result_table): + if file_stats.io.read == 0: + return False - def _get_file_write_datum(self, file_stats): - if file_stats.write == 0: - return None + fd_owners = self._get_file_stats_fd_owners_str(file_stats) + result_table.append_row( + path=mi.Path(file_stats.filename), + size=mi.Size(file_stats.io.read), + fd_owners=mi.String(fd_owners), + ) - fd_by_pid_str = '' - for pid, fd in file_stats.fd_by_pid.items(): - comm = self._analysis.tids[pid].comm - fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid) + return True + + def _append_file_write_usage_row(self, file_stats, result_table): + if file_stats.io.write == 0: + return False - format_str = '{:>10} {} {}' - output_str = format_str.format( - common.convert_size(file_stats.write, padding_after=True), - file_stats.filename, - fd_by_pid_str) + fd_owners = self._get_file_stats_fd_owners_str(file_stats) + result_table.append_row( + path=mi.Path(file_stats.filename), + size=mi.Size(file_stats.io.write), + fd_owners=mi.String(fd_owners), + ) - return (output_str, file_stats.write) + return True + + def _fill_usage_result_table(self, input_list, append_row_cb, + result_table): + count = 0 + limit = self._args.limit - def _output_read(self): + for elem in input_list: + if append_row_cb(elem, result_table): + count += 1 + + if limit is not None and count >= limit: + break + + def _fill_per_process_read_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('total_read'), reverse=True) - label = 'Per-process I/O Read' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_read_datum, label, - graph_args) + self._fill_usage_result_table(input_list, + self._append_per_proc_read_usage_row, + result_table) - def _output_write(self): + def _fill_per_process_write_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), key=operator.attrgetter('total_write'), reverse=True) - label = 'Per-process I/O Write' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_write_datum, label, - graph_args) + self._fill_usage_result_table(input_list, + self._append_per_proc_write_usage_row, + result_table) - def _output_block_read(self): + def _fill_per_process_block_read_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), - key=operator.attrgetter('block_read'), + key=operator.attrgetter('block_io.read'), reverse=True) - label = 'Block I/O Read' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_block_read_datum, - label, graph_args) + self._fill_usage_result_table( + input_list, self._append_per_proc_block_read_usage_row, + result_table) - def _output_block_write(self): + def _fill_per_process_block_write_usage_result_table(self, result_table): input_list = sorted(self._analysis.tids.values(), - key=operator.attrgetter('block_write'), + key=operator.attrgetter('block_io.write'), reverse=True) - label = 'Block I/O Write' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_block_write_datum, - label, graph_args) + self._fill_usage_result_table( + input_list, self._append_per_proc_block_write_usage_row, + result_table) - def _output_total_rq_sectors(self): + def _fill_disk_sector_usage_result_table(self, result_table): input_list = sorted(self._analysis.disks.values(), key=operator.attrgetter('total_rq_sectors'), reverse=True) - label = 'Disk requests sector count' - graph_args = {'unit': ' sectors'} - self._print_ascii_graph(input_list, self._get_total_rq_sectors_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_disk_sector_usage_row, + result_table) - def _output_rq_count(self): + def _fill_disk_request_usage_result_table(self, result_table): input_list = sorted(self._analysis.disks.values(), key=operator.attrgetter('rq_count'), reverse=True) - label = 'Disk request count' - graph_args = {'unit': ' requests'} - self._print_ascii_graph(input_list, self._get_rq_count_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_disk_request_usage_row, + result_table) - def _output_avg_disk_latency(self): + def _fill_disk_rtps_usage_result_table(self, result_table): input_list = self._analysis.disks.values() - label = 'Disk request average latency' - graph_args = {'unit': ' ms', 'sort': 2} - self._print_ascii_graph(input_list, self._get_avg_disk_latency_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_disk_rtps_usage_row, + result_table) - def _output_net_recv_bytes(self): + def _fill_netif_recv_usage_result_table(self, result_table): input_list = sorted(self._analysis.ifaces.values(), key=operator.attrgetter('recv_bytes'), reverse=True) - label = 'Network received bytes' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_net_recv_bytes_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_netif_recv_usage_row, + result_table) - def _output_net_sent_bytes(self): + def _fill_netif_send_usage_result_table(self, result_table): input_list = sorted(self._analysis.ifaces.values(), key=operator.attrgetter('sent_bytes'), reverse=True) - label = 'Network sent bytes' - graph_args = {'with_value': False} - self._print_ascii_graph(input_list, self._get_net_sent_bytes_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_netif_send_usage_row, + result_table) - def _output_file_read(self, files): + def _fill_file_read_usage_result_table(self, files, result_table): input_list = sorted(files.values(), - key=lambda file_stats: file_stats.read, + key=lambda file_stats: file_stats.io.read, reverse=True) - label = 'Files read' - graph_args = {'with_value': False, 'sort': 2} - self._print_ascii_graph(input_list, self._get_file_read_datum, - label, graph_args) + self._fill_usage_result_table(input_list, + self._append_file_read_usage_row, + result_table) - def _output_file_write(self, files): + def _fill_file_write_usage_result_table(self, files, result_table): input_list = sorted(files.values(), - key=lambda file_stats: file_stats.write, + key=lambda file_stats: file_stats.io.write, reverse=True) - label = 'Files write' - graph_args = {'with_value': False, 'sort': 2} - self._print_ascii_graph(input_list, self._get_file_write_datum, - label, graph_args) - - def _output_file_read_write(self): - files = self._analysis.get_files_stats(self._arg_pid_list, - self._arg_proc_list) - self._output_file_read(files) - self._output_file_write(files) - - def iotop_output(self): - self._output_read() - self._output_write() - self._output_file_read_write() - self._output_block_read() - self._output_block_write() - self._output_total_rq_sectors() - self._output_rq_count() - self._output_avg_disk_latency() - self._output_net_recv_bytes() - self._output_net_sent_bytes() + self._fill_usage_result_table(input_list, + self._append_file_write_usage_row, + result_table) + + def _fill_file_usage_result_tables(self, read_table, write_table): + files = self._analysis.get_files_stats() + self._fill_file_read_usage_result_table(files, read_table) + self._fill_file_write_usage_result_table(files, write_table) + + def _get_usage_result_tables(self, begin, end): + # create result tables + per_proc_read_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_PROCESS_TOP, begin, end, 'read') + per_proc_write_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_PROCESS_TOP, begin, end, 'written') + per_file_read_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_FILE_TOP, begin, end, 'read') + per_file_write_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_FILE_TOP, begin, end, 'written') + per_proc_block_read_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, begin, end, 'read') + per_proc_block_write_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK, begin, end, 'written') + per_disk_sector_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_DISK_TOP_SECTOR, begin, end) + per_disk_request_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_DISK_TOP_REQUEST, begin, end) + per_disk_rtps_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_DISK_TOP_RTPS, begin, end) + per_netif_recv_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_NETIF_TOP, begin, end, 'received') + per_netif_send_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_NETIF_TOP, begin, end, 'sent') + + # fill result tables + self._fill_per_process_read_usage_result_table(per_proc_read_table) + self._fill_per_process_write_usage_result_table(per_proc_write_table) + self._fill_file_usage_result_tables(per_file_read_table, + per_file_write_table) + self._fill_per_process_block_read_usage_result_table( + per_proc_block_read_table) + self._fill_per_process_block_write_usage_result_table( + per_proc_block_write_table) + self._fill_disk_sector_usage_result_table(per_disk_sector_table) + self._fill_disk_request_usage_result_table(per_disk_request_table) + self._fill_disk_rtps_usage_result_table(per_disk_rtps_table) + self._fill_netif_recv_usage_result_table(per_netif_recv_table) + self._fill_netif_send_usage_result_table(per_netif_send_table) + + return _UsageTables( + per_proc_read=per_proc_read_table, + per_proc_write=per_proc_write_table, + per_file_read=per_file_read_table, + per_file_write=per_file_write_table, + per_proc_block_read=per_proc_block_read_table, + per_proc_block_write=per_proc_block_write_table, + per_disk_sector=per_disk_sector_table, + per_disk_request=per_disk_request_table, + per_disk_rtps=per_disk_rtps_table, + per_netif_recv=per_netif_recv_table, + per_netif_send=per_netif_send_table, + ) + + def _print_per_proc_io(self, result_table, title): + header_format = '{:<25} {:<10} {:<10} {:<10}' + label_header = header_format.format( + 'Process', 'Disk', 'Net', 'Unknown' + ) + + def get_label(row): + label_format = '{:<25} {:>10} {:>10} {:>10}' + if row.process.pid is None: + pid_str = 'unknown (tid=%d)' % (row.process.tid) + else: + pid_str = str(row.process.pid) + + label = label_format.format( + '%s (%s)' % (row.process.name, pid_str), + format_utils.format_size(row.disk_size.value), + format_utils.format_size(row.net_size.value), + format_utils.format_size(row.unknown_size.value) + ) + + return label + + graph = termgraph.BarGraph( + title='Per-process I/O ' + title, + label_header=label_header, + get_value=lambda row: row.size.value, + get_value_str=format_utils.format_size, + get_label=get_label, + data=result_table.rows + ) + + graph.print_graph() - # I/O Latency frequency output methods - def _print_frequency_distribution(self, duration_list, title): + def _print_per_proc_block_io(self, result_table, title): + def get_label(row): + proc_name = row.process.name + + if not proc_name: + proc_name = 'unknown' + + if row.process.pid is None: + pid_str = 'unknown (tid={})'.format(row.process.tid) + else: + pid_str = str(row.process.pid) + + return '{} (pid={})'.format(proc_name, pid_str) + + graph = termgraph.BarGraph( + title='Block I/O ' + title, + label_header='Process', + get_value=lambda row: row.size.value, + get_value_str=format_utils.format_size, + get_label=get_label, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_disk_sector(self, result_table): + graph = termgraph.BarGraph( + title='Disk Requests Sector Count', + label_header='Disk', + unit='sectors', + get_value=lambda row: row.count.value, + get_label=lambda row: row.disk.name, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_disk_request(self, result_table): + graph = termgraph.BarGraph( + title='Disk Request Count', + label_header='Disk', + unit='requests', + get_value=lambda row: row.count.value, + get_label=lambda row: row.disk.name, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_disk_rtps(self, result_table): + graph = termgraph.BarGraph( + title='Disk Request Average Latency', + label_header='Disk', + unit='ms', + get_value=lambda row: row.rtps.value / common.NSEC_PER_MSEC, + get_label=lambda row: row.disk.name, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_netif_io(self, result_table, title): + graph = termgraph.BarGraph( + title='Network ' + title + ' Bytes', + label_header='Interface', + get_value=lambda row: row.size.value, + get_value_str=format_utils.format_size, + get_label=lambda row: row.netif.name, + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_file_io(self, result_table, title): + # FIXME add option to show FD owners + # FIXME why are read and write values the same? + graph = termgraph.BarGraph( + title='Per-file I/O ' + title, + label_header='Path', + get_value=lambda row: row.size.value, + get_value_str=format_utils.format_size, + get_label=lambda row: row.path.path, + data=result_table.rows + ) + + graph.print_graph() + + def _print_usage(self, usage_tables): + self._print_per_proc_io(usage_tables.per_proc_read, 'Read') + self._print_per_proc_io(usage_tables.per_proc_write, 'Write') + self._print_per_file_io(usage_tables.per_file_read, 'Read') + self._print_per_file_io(usage_tables.per_file_write, 'Write') + self._print_per_proc_block_io(usage_tables.per_proc_block_read, 'Read') + self._print_per_proc_block_io( + usage_tables.per_proc_block_write, 'Write' + ) + self._print_per_disk_sector(usage_tables.per_disk_sector) + self._print_per_disk_request(usage_tables.per_disk_request) + self._print_per_disk_rtps(usage_tables.per_disk_rtps) + self._print_per_netif_io(usage_tables.per_netif_recv, 'Received') + self._print_per_netif_io(usage_tables.per_netif_send, 'Sent') + + def _fill_freq_result_table(self, duration_list, result_table): if not duration_list: return # The number of bins for the histogram - resolution = self._arg_freq_resolution + resolution = self._args.freq_resolution min_duration = min(duration_list) max_duration = max(duration_list) @@ -451,233 +730,356 @@ max_duration /= 1000 step = (max_duration - min_duration) / resolution + if step == 0: return buckets = [] values = [] - graph = Pyasciigraph() + for i in range(resolution): buckets.append(i * step) values.append(0) + for duration in duration_list: duration /= 1000 index = min(int((duration - min_duration) / step), resolution - 1) values[index] += 1 - graph_data = [] for index, value in enumerate(values): - # The graph data format is a tuple (info, value). Here info - # is the lower bound of the bucket, value the bucket's count - graph_data.append(('%0.03f' % (index * step + min_duration), - value)) - - graph_lines = graph.graph( - title, - graph_data, - info_before=True, - count=True - ) + result_table.append_row( + latency_lower=mi.Duration.from_us(index * step + min_duration), + latency_upper=mi.Duration.from_us((index + 1) * step + + min_duration), + count=mi.Integer(value), + ) - for line in graph_lines: - print(line) + def _get_disk_freq_result_tables(self, begin, end): + result_tables = [] - print() - - def _output_disk_latency_freq(self): for disk in self._analysis.disks.values(): - rq_durations = [rq.duration for rq in disk.rq_list] - self._print_frequency_distribution( - rq_durations, - 'Frequency distribution for disk %s (usec)' % (disk.disk_name)) - - def iolatency_output(self): - self._output_disk_latency_freq() - - def iolatency_syscalls_output(self): - print() - self._print_frequency_distribution([io_rq.duration for io_rq in - self._analysis.open_io_requests if - self._filter_io_request(io_rq)], - 'Open latency distribution (usec)') - self._print_frequency_distribution([io_rq.duration for io_rq in - self._analysis.read_io_requests if - self._filter_io_request(io_rq)], - 'Read latency distribution (usec)') - self._print_frequency_distribution([io_rq.duration for io_rq in - self._analysis.write_io_requests if - self._filter_io_request(io_rq)], - 'Write latency distribution (usec)') - self._print_frequency_distribution([io_rq.duration for io_rq in - self._analysis.sync_io_requests if - self._filter_io_request(io_rq)], - 'Sync latency distribution (usec)') + rq_durations = [rq.duration for rq in disk.rq_list if + self._filter_io_request(rq)] + subtitle = 'disk: {}'.format(disk.disk_name) + result_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin, end, subtitle) + self._fill_freq_result_table(rq_durations, result_table) + result_tables.append(result_table) + + return result_tables + + def _get_syscall_freq_result_tables(self, begin, end): + open_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin, end, 'open') + read_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin, end, 'read') + write_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin, end, 'write') + sync_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin, end, 'sync') + self._fill_freq_result_table([io_rq.duration for io_rq in + self._analysis.open_io_requests if + self._filter_io_request(io_rq)], + open_table) + self._fill_freq_result_table([io_rq.duration for io_rq in + self._analysis.read_io_requests if + self._filter_io_request(io_rq)], + read_table) + self._fill_freq_result_table([io_rq.duration for io_rq in + self._analysis.write_io_requests if + self._filter_io_request(io_rq)], + write_table) + self._fill_freq_result_table([io_rq.duration for io_rq in + self._analysis.sync_io_requests if + self._filter_io_request(io_rq)], + sync_table) + + return [open_table, read_table, write_table, sync_table] + + def _get_freq_result_tables(self, begin, end): + syscall_tables = self._get_syscall_freq_result_tables(begin, end) + disk_tables = self._get_disk_freq_result_tables(begin, end) + + return syscall_tables + disk_tables + + def _print_one_freq(self, result_table): + graph = termgraph.FreqGraph( + data=result_table.rows, + get_value=lambda row: row.count.value, + get_lower_bound=lambda row: row.latency_lower.to_us(), + title='{} {}'.format(result_table.title, result_table.subtitle), + unit='µs' + ) - # I/O latency top and log output methods - def _output_io_request(self, io_rq): - fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}' + graph.print_graph() - begin_time = common.ns_to_hour_nsec(io_rq.begin_ts, - self._arg_multi_day, - self._arg_gmt) - end_time = common.ns_to_hour_nsec(io_rq.end_ts, - self._arg_multi_day, - self._arg_gmt) - time_range_str = '[' + begin_time + ',' + end_time + ']' - duration_str = '%0.03f' % (io_rq.duration / 1000) + def _print_freq(self, freq_tables): + for freq_table in freq_tables: + self._print_one_freq(freq_table) + def _append_log_row(self, io_rq, result_table): if io_rq.size is None: - size = 'N/A' + size = mi.Empty() else: - size = common.convert_size(io_rq.size) + size = mi.Size(io_rq.size) tid = io_rq.tid proc_stats = self._analysis.tids[tid] - comm = proc_stats.comm + proc_name = proc_stats.comm # TODO: handle fd_in/fd_out for RW type operations if io_rq.fd is None: - file_str = 'N/A' + path = mi.Empty() + fd = mi.Empty() else: - fd = io_rq.fd - + fd = mi.Fd(io_rq.fd) parent_proc = proc_stats + if parent_proc.pid is not None: parent_proc = self._analysis.tids[parent_proc.pid] - fd_stats = parent_proc.get_fd(fd, io_rq.end_ts) + fd_stats = parent_proc.get_fd(io_rq.fd, io_rq.end_ts) + if fd_stats is not None: - filename = fd_stats.filename + path = mi.Path(fd_stats.filename) + else: + path = mi.Unknown() + + result_table.append_row( + time_range=mi.TimeRange(io_rq.begin_ts, io_rq.end_ts), + out_of_range=mi.Boolean(self._is_io_rq_out_of_range(io_rq)), + duration=mi.Duration(io_rq.duration), + syscall=mi.Syscall(io_rq.syscall_name), + size=size, + process=mi.Process(proc_name, tid=tid), + path=path, + fd=fd, + ) + + def _fill_log_result_table(self, rq_list, sort_key, is_top, result_table): + if not rq_list: + return + + count = 0 + + for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key), + reverse=is_top): + if is_top and count > self._args.limit: + break + + self._append_log_row(io_rq, result_table) + count += 1 + + def _fill_log_result_table_from_io_requests(self, io_requests, sort_key, + is_top, result_table): + io_requests = [io_rq for io_rq in io_requests if + self._filter_io_request(io_rq)] + self._fill_log_result_table(io_requests, sort_key, is_top, + result_table) + + def _get_top_result_tables(self, begin, end): + open_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, + begin, end, 'open') + read_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, + begin, end, 'read') + write_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, + begin, end, 'write') + sync_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL, + begin, end, 'sync') + self._fill_log_result_table_from_io_requests( + self._analysis.open_io_requests, 'duration', True, open_table) + self._fill_log_result_table_from_io_requests( + self._analysis.read_io_requests, 'duration', True, read_table) + self._fill_log_result_table_from_io_requests( + self._analysis.write_io_requests, 'duration', True, write_table) + self._fill_log_result_table_from_io_requests( + self._analysis.sync_io_requests, 'duration', True, sync_table) + + return [open_table, read_table, write_table, sync_table] + + def _print_log_row(self, row): + fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}' + begin_time = common.ns_to_hour_nsec(row.time_range.begin, + self._args.multi_day, + self._args.gmt) + end_time = common.ns_to_hour_nsec(row.time_range.end, + self._args.multi_day, + self._args.gmt) + time_range_str = '[' + begin_time + ',' + end_time + ']' + duration_str = '%0.03f' % row.duration.to_us() + + if type(row.size) is mi.Empty: + size = 'N/A' + else: + size = format_utils.format_size(row.size.value) + + tid = row.process.tid + proc_name = row.process.name + + if type(row.fd) is mi.Empty: + file_str = 'N/A' + else: + if type(row.path) is mi.Unknown: + path = 'unknown' else: - filename = 'unknown' + path = row.path.path - file_str = '%s (fd=%s)' % (filename, fd) + file_str = '%s (fd=%s)' % (path, row.fd.fd) - if self._is_io_rq_out_of_range(io_rq): + if row.out_of_range.value: time_range_str += '*' duration_str += '*' else: time_range_str += ' ' duration_str += ' ' - print(fmt.format(time_range_str, io_rq.syscall_name, duration_str, - size, comm, tid, file_str)) + print(fmt.format(time_range_str, row.syscall.name, duration_str, + size, proc_name, tid, file_str)) - def _output_io_requests_list(self, rq_list, title, sort_key, is_top=False): - if not rq_list: + def _print_log(self, result_table): + if not result_table.rows: return - count = 0 has_out_of_range_rq = False print() - print(title) - + fmt = '{} {} (usec)' + print(fmt.format(result_table.title, result_table.subtitle)) header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}' print(header_fmt.format( 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID', 'Filename')) - for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key), - reverse=is_top): - if is_top and count > self._arg_limit: - break + for row in result_table.rows: + self._print_log_row(row) - self._output_io_request(io_rq) - if not has_out_of_range_rq and self._is_io_rq_out_of_range(io_rq): + if not has_out_of_range_rq and row.out_of_range.value: has_out_of_range_rq = True - count += 1 - if has_out_of_range_rq: print('*: Syscalls started and/or completed outside of the ' 'range specified') - def _output_latency_log_from_requests(self, io_requests, title, sort_key, - is_top=False): - io_requests = [io_rq for io_rq in io_requests if - self._filter_io_request(io_rq)] - self._output_io_requests_list(io_requests, title, sort_key, is_top) + def _print_top(self, top_tables): + for table in top_tables: + self._print_log(table) + + def _get_log_result_table(self, begin, end): + log_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, + begin, end) + self._fill_log_result_table_from_io_requests( + self._analysis.io_requests, 'begin_ts', False, log_table) + + return log_table + + def _append_latency_stats_row(self, obj, rq_durations, result_table): + rq_count = len(rq_durations) + total_duration = sum(rq_durations) + + if len(rq_durations) > 0: + min_duration = min(rq_durations) + max_duration = max(rq_durations) + else: + min_duration = 0 + max_duration = 0 - def iolatency_syscalls_top_output(self): - self._output_latency_log_from_requests( - [io_rq for io_rq in self._analysis.open_io_requests if - self._filter_io_request(io_rq)], - 'Top open syscall latencies (usec)', - 'duration', is_top=True) - self._output_io_requests_list( - [io_rq for io_rq in self._analysis.read_io_requests if - self._filter_io_request(io_rq)], - 'Top read syscall latencies (usec)', - 'duration', is_top=True) - self._output_io_requests_list( - [io_rq for io_rq in self._analysis.write_io_requests if - self._filter_io_request(io_rq)], - 'Top write syscall latencies (usec)', - 'duration', is_top=True) - self._output_io_requests_list( - [io_rq for io_rq in self._analysis.sync_io_requests if - self._filter_io_request(io_rq)], - 'Top sync syscall latencies (usec)', - 'duration', is_top=True) - - def iolatency_syscalls_log_output(self): - self._output_io_requests_list( - self._analysis.io_requests, - 'Log of all I/O system calls', - 'begin_ts') - - # I/O Stats output methods - def _output_latency_stats(self, name, rq_count, min_duration, max_duration, - total_duration, rq_durations): if rq_count < 2: - stdev = '?' + stdev = mi.Unknown() else: - stdev = '%0.03f' % (statistics.stdev(rq_durations) / 1000) + stdev = mi.Duration(statistics.stdev(rq_durations)) if rq_count > 0: - avg = '%0.03f' % (total_duration / (rq_count) / 1000) + avg = total_duration / rq_count else: - avg = "0.000" - min_duration = '%0.03f' % (min_duration / 1000) - max_duration = '%0.03f' % (max_duration / 1000) + avg = 0 - print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( - name, rq_count, min_duration, avg, max_duration, stdev)) + result_table.append_row( + obj=obj, + count=mi.Integer(rq_count), + min_latency=mi.Duration(min_duration), + avg_latency=mi.Duration(avg), + max_latency=mi.Duration(max_duration), + stdev_latency=stdev, + ) - def _output_latency_stats_from_requests(self, io_requests, name): + def _append_latency_stats_row_from_requests(self, obj, io_requests, + result_table): rq_durations = [io_rq.duration for io_rq in io_requests if self._filter_io_request(io_rq)] - rq_count = len(rq_durations) - if len(rq_durations) > 0: - min_duration = min(rq_durations) - max_duration = max(rq_durations) + self._append_latency_stats_row(obj, rq_durations, result_table) + + def _get_syscall_latency_stats_result_table(self, begin, end): + result_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_SYSCALL_LATENCY_STATS, begin, end) + append_fn = self._append_latency_stats_row_from_requests + append_fn(mi.String('Open'), self._analysis.open_io_requests, + result_table) + append_fn(mi.String('Read'), self._analysis.read_io_requests, + result_table) + append_fn(mi.String('Write'), self._analysis.write_io_requests, + result_table) + append_fn(mi.String('Sync'), self._analysis.sync_io_requests, + result_table) + + return result_table + + def _get_disk_latency_stats_result_table(self, begin, end): + if not self._analysis.disks: + return + + result_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_PART_LATENCY_STATS, begin, end) + + for disk in self._analysis.disks.values(): + if disk.rq_count: + rq_durations = [rq.duration for rq in disk.rq_list if + self._filter_io_request(rq)] + disk = mi.Disk(disk.disk_name) + self._append_latency_stats_row(disk, rq_durations, + result_table) + + return result_table + + def _get_latency_stats_result_tables(self, begin, end): + syscall_tbl = self._get_syscall_latency_stats_result_table(begin, end) + disk_tbl = self._get_disk_latency_stats_result_table(begin, end) + + return syscall_tbl, disk_tbl + + def _print_latency_stats_row(self, row): + if type(row.stdev_latency) is mi.Unknown: + stdev = '?' else: - min_duration = 0 - max_duration = 0 - total_duration = sum(rq_durations) + stdev = '%0.03f' % row.stdev_latency.to_us() + + avg = '%0.03f' % row.avg_latency.to_us() + min_duration = '%0.03f' % row.min_latency.to_us() + max_duration = '%0.03f' % row.max_latency.to_us() - self._output_latency_stats(name, rq_count, min_duration, - max_duration, total_duration, - rq_durations) + print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( + str(row.obj), row.count.value, min_duration, + avg, max_duration, stdev)) - def _output_syscalls_latency_stats(self): + def _print_syscall_latency_stats(self, stats_table): print('\nSyscalls latency statistics (usec):') print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format( 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev')) print(IoAnalysisCommand._SECTION_SEPARATOR_STRING) - self._output_latency_stats_from_requests( - self._analysis.open_io_requests, 'Open') - self._output_latency_stats_from_requests( - self._analysis.read_io_requests, 'Read') - self._output_latency_stats_from_requests( - self._analysis.write_io_requests, 'Write') - self._output_latency_stats_from_requests( - self._analysis.sync_io_requests, 'Sync') + for row in stats_table.rows: + self._print_latency_stats_row(row) - def _output_disk_latency_stats(self): - if not self._analysis.disks: + def _print_disk_latency_stats(self, stats_table): + if not stats_table.rows: return print('\nDisk latency statistics (usec):') @@ -685,82 +1087,99 @@ 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev')) print(IoAnalysisCommand._SECTION_SEPARATOR_STRING) - for disk in self._analysis.disks.values(): - if disk.rq_count: - rq_durations = [rq.duration for rq in disk.rq_list] - self._output_latency_stats(disk.disk_name, - disk.rq_count, - disk.min_rq_duration, - disk.max_rq_duration, - disk.total_rq_duration, - rq_durations) - - def iostats_output(self): - self._output_syscalls_latency_stats() - self._output_disk_latency_stats() - - def _print_results(self, begin_ns, end_ns): - self._print_date(begin_ns, end_ns) - if self._arg_usage: - self.iotop_output() - if self._arg_stats: - self.iostats_output() - if self._arg_latencytop: - self.iolatency_syscalls_top_output() - if self._arg_freq: - self.iolatency_syscalls_output() - self.iolatency_output() - if self._arg_log: - self.iolatency_syscalls_log_output() + for row in stats_table.rows: + self._print_latency_stats_row(row) - def _reset_total(self, start_ts): - self._analysis.reset() + def _print_latency_stats(self, syscall_latency_stats_table, + disk_latency_stats_table): + self._print_syscall_latency_stats(syscall_latency_stats_table) + self._print_disk_latency_stats(disk_latency_stats_table) def _add_arguments(self, ap): + Command._add_proc_filter_args(ap) + Command._add_min_max_args(ap) + Command._add_log_args( + ap, help='Output the I/O requests in chronological order') + Command._add_top_args( + ap, help='Output the top I/O latencies by category') + Command._add_stats_args(ap, help='Output the I/O latency statistics') + Command._add_freq_args( + ap, help='Output the I/O latency frequency distribution') ap.add_argument('--usage', action='store_true', - help='Show the I/O usage') - ap.add_argument('--latencystats', action='store_true', - help='Show the I/O latency statistics') - ap.add_argument('--latencytop', action='store_true', - help='Show the I/O latency top') - ap.add_argument('--latencyfreq', action='store_true', - help='Show the I/O latency frequency distribution') - ap.add_argument('--freq-resolution', type=int, default=20, - help='Frequency distribution resolution ' - '(default 20)') + help='Output the I/O usage') + ap.add_argument('--minsize', type=float, + help='Filter out, I/O operations working with ' + 'less that minsize bytes') + ap.add_argument('--maxsize', type=float, + help='Filter out, I/O operations working with ' + 'more that maxsize bytes') -# entry point -def runstats(): - # create command - iocmd = IoAnalysisCommand() - # execute command - iocmd.run_stats() +def _run(mi_mode): + iocmd = IoAnalysisCommand(mi_mode=mi_mode) + iocmd.run() -def runlatencytop(): - # create command - iocmd = IoAnalysisCommand() - # execute command - iocmd.run_latencytop() +def _runstats(mi_mode): + sys.argv.insert(1, '--stats') + _run(mi_mode) + + +def _runlog(mi_mode): + sys.argv.insert(1, '--log') + _run(mi_mode) + + +def _runfreq(mi_mode): + sys.argv.insert(1, '--freq') + _run(mi_mode) + + +def _runlatencytop(mi_mode): + sys.argv.insert(1, '--top') + _run(mi_mode) + + +def _runusage(mi_mode): + sys.argv.insert(1, '--usage') + _run(mi_mode) + + +def runstats(): + _runstats(mi_mode=False) def runlog(): - # create command - iocmd = IoAnalysisCommand() - # execute command - iocmd.run_log() + _runlog(mi_mode=False) def runfreq(): - # create command - iocmd = IoAnalysisCommand() - # execute command - iocmd.run_freq() + _runfreq(mi_mode=False) + + +def runlatencytop(): + _runlatencytop(mi_mode=False) def runusage(): - # create command - iocmd = IoAnalysisCommand() - # execute command - iocmd.run_usage() + _runusage(mi_mode=False) + + +def runstats_mi(): + _runstats(mi_mode=True) + + +def runlog_mi(): + _runlog(mi_mode=True) + + +def runfreq_mi(): + _runfreq(mi_mode=True) + + +def runlatencytop_mi(): + _runlatencytop(mi_mode=True) + + +def runusage_mi(): + _runusage(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/irq.py lttnganalyses-0.4.3/lttnganalyses/cli/irq.py --- lttnganalyses-0.3.0/lttnganalyses/cli/irq.py 2015-07-13 18:00:14.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/irq.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,9 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque +# 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,243 +22,525 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import itertools +import math +import statistics +import sys +from . import mi +from . import termgraph from .command import Command from ..core import irq as core_irq from ..linuxautomaton import common, sv -from ..ascii_graph import Pyasciigraph - -import math -import statistics class IrqAnalysisCommand(Command): _DESC = """The irq command.""" + _ANALYSIS_CLASS = core_irq.IrqAnalysis + _MI_TITLE = 'System interrupt analysis' + _MI_DESCRIPTION = 'Interrupt frequency distribution, statistics, and log' + _MI_TAGS = [mi.Tags.INTERRUPT, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.LOG] + _MI_TABLE_CLASS_LOG = 'log' + _MI_TABLE_CLASS_HARD_STATS = 'hard-stats' + _MI_TABLE_CLASS_SOFT_STATS = 'soft-stats' + _MI_TABLE_CLASS_FREQ = 'freq' + _MI_TABLE_CLASS_SUMMARY = 'summary' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_LOG, + 'Interrupt log', [ + ('time_range', 'Time range', mi.TimeRange), + ('raised_ts', 'Raised timestamp', mi.Timestamp), + ('cpu', 'CPU', mi.Cpu), + ('irq', 'Interrupt', mi.Irq), + ] + ), + ( + _MI_TABLE_CLASS_HARD_STATS, + 'Hardware interrupt statistics', [ + ('irq', 'Interrupt', mi.Irq), + ('count', 'Interrupt count', mi.Integer, 'interrupts'), + ('min_duration', 'Minimum duration', mi.Duration), + ('avg_duration', 'Average duration', mi.Duration), + ('max_duration', 'Maximum duration', mi.Duration), + ('stdev_duration', 'Interrupt duration standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_SOFT_STATS, + 'Hardware interrupt statistics', [ + ('irq', 'Interrupt', mi.Irq), + ('count', 'Interrupt count', mi.Integer, 'interrupts'), + ('min_duration', 'Minimum duration', mi.Duration), + ('avg_duration', 'Average duration', mi.Duration), + ('max_duration', 'Maximum duration', mi.Duration), + ('stdev_duration', 'Interrupt duration standard deviation', + mi.Duration), + ('raise_count', 'Interrupt raise count', mi.Integer, + 'interrupt raises'), + ('min_latency', 'Minimum raise latency', mi.Duration), + ('avg_latency', 'Average raise latency', mi.Duration), + ('max_latency', 'Maximum raise latency', mi.Duration), + ('stdev_latency', 'Interrupt raise latency standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_FREQ, + 'Interrupt handler duration frequency distribution', [ + ('duration_lower', 'Duration (lower bound)', mi.Duration), + ('duration_upper', 'Duration (upper bound)', mi.Duration), + ('count', 'Interrupt count', mi.Integer, 'interrupts'), + ] + ), + ( + _MI_TABLE_CLASS_SUMMARY, + 'Interrupt statistics - summary', [ + ('time_range', 'Time range', mi.TimeRange), + ('count', 'Total interrupt count', mi.Integer, 'interrupts'), + ] + ), + ] + + def _analysis_tick(self, begin_ns, end_ns): + log_table = None + hard_stats_table = None + soft_stats_table = None + freq_tables = None + + if self._args.log: + log_table = self._get_log_result_table(begin_ns, end_ns) + + if self._args.stats or self._args.freq: + hard_stats_table, soft_stats_table, freq_tables = \ + self._get_stats_freq_result_tables(begin_ns, end_ns) + + if self._mi_mode: + self._mi_append_result_table(log_table) + self._mi_append_result_table(hard_stats_table) + self._mi_append_result_table(soft_stats_table) - def __init__(self): - super().__init__(self._add_arguments, - enable_max_min_args=True, - enable_freq_arg=True, - enable_log_arg=True, - enable_stats_arg=True) - - def _validate_transform_args(self): - self._arg_irq_filter_list = None - self._arg_softirq_filter_list = None - - if self._args.irq: - self._arg_irq_filter_list = self._args.irq.split(',') - if self._args.softirq: - self._arg_softirq_filter_list = self._args.softirq.split(',') - - def _default_args(self, stats, log, freq): - if stats: - self._arg_stats = True - if log: - self._arg_log = True - if freq: - self._arg_freq = True - - def run(self, stats=False, log=False, freq=False): - # parse arguments first - self._parse_args() - # validate, transform and save specific arguments - self._validate_transform_args() - # handle the default args for different executables - self._default_args(stats, log, freq) - # open the trace - self._open_trace() - # create the appropriate analysis/analyses - self._create_analysis() - # run the analysis - self._run_analysis(self._reset_total, self._refresh) - # print results - self._print_results(self.start_ns, self.trace_end_ts) - # close the trace - self._close_trace() - - def run_stats(self): - self.run(stats=True) - - def run_log(self): - self.run(log=True) - - def run_freq(self): - self.run(freq=True) - - def _create_analysis(self): - self._analysis = core_irq.IrqAnalysis(self.state, - self._arg_min, - self._arg_max) - - def _compute_duration_stdev(self, irq_stats_item): - if irq_stats_item.count < 2: - return float('nan') + if self._args.freq_series: + freq_tables = [self._get_freq_series_table(freq_tables)] - durations = [] - for irq in irq_stats_item.irq_list: - durations.append(irq.end_ts - irq.begin_ts) + self._mi_append_result_tables(freq_tables) + else: + self._print_date(begin_ns, end_ns) - return statistics.stdev(durations) + if hard_stats_table or soft_stats_table or freq_tables: + self._print_stats_freq(hard_stats_table, soft_stats_table, + freq_tables) + if log_table: + print() + + if log_table: + self._print_log(log_table) + + def _create_summary_result_tables(self): + if not self._args.stats: + self._mi_clear_result_tables() + return - def _compute_raise_latency_stdev(self, irq_stats_item): - if irq_stats_item.raise_count < 2: - return float('nan') + hard_stats_tables = \ + self._mi_get_result_tables(self._MI_TABLE_CLASS_HARD_STATS) + soft_stats_tables = \ + self._mi_get_result_tables(self._MI_TABLE_CLASS_SOFT_STATS) + assert len(hard_stats_tables) == len(soft_stats_tables) + begin = hard_stats_tables[0].timerange.begin + end = hard_stats_tables[-1].timerange.end + summary_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, + begin, end) + + for hs_table, ss_table in zip(hard_stats_tables, soft_stats_tables): + assert hs_table.timerange == ss_table.timerange + + for row in itertools.chain(hs_table.rows, ss_table.rows): + summary_table.append_row( + time_range=hs_table.timerange, + count=row.count, + ) + + self._mi_clear_result_tables() + self._mi_append_result_table(summary_table) + + def _get_log_result_table(self, begin_ns, end_ns): + result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, + begin_ns, end_ns) - raise_latencies = [] - for irq in irq_stats_item.irq_list: - if irq.raise_ts is None: + for irq in self._analysis.irq_list: + if not self._filter_irq(irq): continue - raise_latencies.append(irq.begin_ts - irq.raise_ts) + if type(irq) is sv.HardIRQ: + is_hard = True + raised_ts_do = mi.Empty() + name = self._analysis.hard_irq_stats[irq.id].name + else: + is_hard = False - return statistics.stdev(raise_latencies) + if irq.raise_ts is None: + raised_ts_do = mi.Unknown() + else: + raised_ts_do = mi.Timestamp(irq.raise_ts) + + name = self._analysis.softirq_stats[irq.id].name + + result_table.append_row( + time_range=mi.TimeRange(irq.begin_ts, irq.end_ts), + raised_ts=raised_ts_do, + cpu=mi.Cpu(irq.cpu_id), + irq=mi.Irq(is_hard, irq.id, name), + ) + + return result_table + + def _get_common_stats_result_table_row(self, is_hard, irq_nr, irq_stats): + stdev = self._compute_duration_stdev(irq_stats) + + if math.isnan(stdev): + stdev = mi.Unknown() + else: + stdev = mi.Duration(stdev) + + return ( + mi.Irq(is_hard, irq_nr, irq_stats.name), + mi.Integer(irq_stats.count), + mi.Duration(irq_stats.min_duration), + mi.Duration(irq_stats.total_duration / irq_stats.count), + mi.Duration(irq_stats.max_duration), + stdev, + ) - def _print_frequency_distribution(self, irq_stats_item, id): + def _append_hard_stats_result_table_row(self, irq_nr, irq_stats, + hard_stats_table): + common_row = self._get_common_stats_result_table_row(True, irq_nr, + irq_stats) + hard_stats_table.append_row( + irq=common_row[0], + count=common_row[1], + min_duration=common_row[2], + avg_duration=common_row[3], + max_duration=common_row[4], + stdev_duration=common_row[5], + ) + + def _append_soft_stats_result_table_row(self, irq_nr, irq_stats, + soft_stats_table): + common_row = self._get_common_stats_result_table_row(False, irq_nr, + irq_stats) + + if irq_stats.raise_count == 0: + min_latency = mi.Unknown() + avg_latency = mi.Unknown() + max_latency = mi.Unknown() + stdev_latency = mi.Unknown() + else: + min_latency = mi.Duration(irq_stats.min_raise_latency) + avg_latency = irq_stats.total_raise_latency / irq_stats.raise_count + avg_latency = mi.Duration(avg_latency) + max_latency = mi.Duration(irq_stats.max_raise_latency) + stdev = self._compute_raise_latency_stdev(irq_stats) + + if math.isnan(stdev): + stdev_latency = mi.Unknown() + else: + stdev_latency = mi.Duration(stdev) + + soft_stats_table.append_row( + irq=common_row[0], + count=common_row[1], + min_duration=common_row[2], + avg_duration=common_row[3], + max_duration=common_row[4], + stdev_duration=common_row[5], + raise_count=mi.Integer(irq_stats.raise_count), + min_latency=min_latency, + avg_latency=avg_latency, + max_latency=max_latency, + stdev_latency=stdev_latency, + ) + + def _fill_freq_result_table(self, irq_stats, freq_table): # The number of bins for the histogram - resolution = self._arg_freq_resolution + resolution = self._args.freq_resolution + if self._args.min is not None: + min_duration = self._args.min + else: + min_duration = irq_stats.min_duration + if self._args.max is not None: + max_duration = self._args.max + else: + max_duration = irq_stats.max_duration - min_duration = irq_stats_item.min_duration - max_duration = irq_stats_item.max_duration # ns to µs min_duration /= 1000 max_duration /= 1000 - step = (max_duration - min_duration) / resolution + # histogram's step + if self._args.freq_uniform: + # TODO: perform only one time + durations = [irq.duration for irq in self._analysis.irq_list] + min_duration, max_duration, step = \ + self._get_uniform_freq_values(durations) + else: + step = (max_duration - min_duration) / resolution + if step == 0: return buckets = [] - values = [] - graph = Pyasciigraph() + counts = [] + for i in range(resolution): buckets.append(i * step) - values.append(0) - for irq in irq_stats_item.irq_list: - duration = (irq.end_ts - irq.begin_ts) / 1000 - index = min(int((duration - min_duration) / step), resolution - 1) - values[index] += 1 - - graph_data = [] - for index, value in enumerate(values): - # The graph data format is a tuple (info, value). Here info - # is the lower bound of the bucket, value the bucket's count - graph_data.append(('%0.03f' % (index * step + min_duration), - value)) - - graph_lines = graph.graph( - 'Handler duration frequency distribution %s (%s) (usec)' % - (irq_stats_item.name, id), - graph_data, - info_before=True, - count=True - ) + counts.append(0) - for line in graph_lines: - print(line) + for irq in irq_stats.irq_list: + duration = irq.duration / 1000 + index = int((duration - min_duration) / step) + + if index >= resolution: + # special case for max value: put in last bucket (includes + # its upper bound) + if duration == max_duration: + counts[index - 1] += 1 - def _filter_irq(self, irq): - if type(irq) is sv.HardIRQ: - if self._arg_irq_filter_list: - return str(irq.id) in self._arg_irq_filter_list - if self._arg_softirq_filter_list: - return False - else: # SoftIRQ - if self._arg_softirq_filter_list: - return str(irq.id) in self._arg_softirq_filter_list - if self._arg_irq_filter_list: - return False + continue - return True + counts[index] += 1 + + for index, count in enumerate(counts): + lower_bound = index * step + min_duration + upper_bound = (index + 1) * step + min_duration + freq_table.append_row( + duration_lower=mi.Duration.from_us(lower_bound), + duration_upper=mi.Duration.from_us(upper_bound), + count=mi.Integer(count), + ) + + def _fill_stats_freq_result_tables(self, begin_ns, end_ns, is_hard, + analysis_stats, filter_list, + hard_stats_table, soft_stats_table, + freq_tables): + for id in sorted(analysis_stats): + if filter_list and str(id) not in filter_list: + continue + + irq_stats = analysis_stats[id] + + if irq_stats.count == 0: + continue + + if self._args.stats: + if is_hard: + append_row_fn = self._append_hard_stats_result_table_row + table = hard_stats_table + else: + append_row_fn = self._append_soft_stats_result_table_row + table = soft_stats_table + + append_row_fn(id, irq_stats, table) + + if self._args.freq: + subtitle = '{} ({})'.format(irq_stats.name, id) + freq_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin_ns, end_ns, subtitle) + self._fill_freq_result_table(irq_stats, freq_table) + + # it is possible that the frequency distribution result + # table is empty; we need to keep it any way because + # there's a 1-to-1 association between the statistics + # row indexes (if available) and the frequency table + # indexes + freq_tables.append(freq_table) + + def _get_freq_series_table(self, freq_tables): + if not freq_tables: + return + + column_infos = [ + ('duration_lower', 'Duration (lower bound)', mi.Duration), + ('duration_upper', 'Duration (upper bound)', mi.Duration), + ] + + for index, freq_table in enumerate(freq_tables): + column_infos.append(( + 'irq{}'.format(index), + freq_table.subtitle, + mi.Integer, + 'interrupts' + )) + + title = 'Interrupt handlers duration frequency distributions' + table_class = mi.TableClass(None, title, column_infos) + begin = freq_tables[0].timerange.begin + end = freq_tables[0].timerange.end + result_table = mi.ResultTable(table_class, begin, end) + + for row_index, freq0_row in enumerate(freq_tables[0].rows): + row_tuple = [ + freq0_row.duration_lower, + freq0_row.duration_upper, + ] + + for freq_table in freq_tables: + freq_row = freq_table.rows[row_index] + row_tuple.append(freq_row.count) + + result_table.append_row_tuple(tuple(row_tuple)) + + return result_table + + def _get_stats_freq_result_tables(self, begin_ns, end_ns): + def fill_stats_freq_result_tables(is_hard, stats, filter_list): + self._fill_stats_freq_result_tables(begin_ns, end_ns, is_hard, + stats, filter_list, + hard_stats_table, + soft_stats_table, freq_tables) + + hard_stats_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_HARD_STATS, + begin_ns, end_ns) + soft_stats_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_SOFT_STATS, + begin_ns, end_ns) + freq_tables = [] + + if self._args.irq_filter_list is not None or \ + self._args.softirq_filter_list is None: + fill_stats_freq_result_tables(True, self._analysis.hard_irq_stats, + self._args.irq_filter_list) + + if self._args.softirq_filter_list is not None or \ + self._args.irq_filter_list is None: + fill_stats_freq_result_tables(False, self._analysis.softirq_stats, + self._args.softirq_filter_list) + + return hard_stats_table, soft_stats_table, freq_tables + + def _ns_to_hour_nsec(self, ts): + return common.ns_to_hour_nsec(ts, self._args.multi_day, self._args.gmt) - def _print_irq_log(self): + def _print_log(self, result_table): fmt = '[{:<18}, {:<18}] {:>15} {:>4} {:<9} {:>4} {:<22}' title_fmt = '{:<20} {:<19} {:>15} {:>4} {:<9} {:>4} {:<22}' print(title_fmt.format('Begin', 'End', 'Duration (us)', 'CPU', 'Type', '#', 'Name')) - for irq in self._analysis.irq_list: - if not self._filter_irq(irq): - continue + for row in result_table.rows: + timerange = row.time_range + begin_ts = timerange.begin + end_ts = timerange.end + + if type(row.raised_ts) is mi.Timestamp: + raised_fmt = ' (raised at %s)' + raised_ts = \ + raised_fmt % self._ns_to_hour_nsec(row.raised_ts.value) + else: + raised_ts = '' - raise_ts = '' - if type(irq) is sv.HardIRQ: - name = self._analysis.hard_irq_stats[irq.id].name + cpu_id = row.cpu.id + irq_do = row.irq + + if irq_do.is_hard: irqtype = 'IRQ' else: - name = self._analysis.softirq_stats[irq.id].name irqtype = 'SoftIRQ' - if irq.raise_ts is not None: - raise_ts = ' (raised at %s)' % \ - (common.ns_to_hour_nsec(irq.raise_ts, - self._arg_multi_day, - self._arg_gmt)) - - print(fmt.format(common.ns_to_hour_nsec(irq.begin_ts, - self._arg_multi_day, - self._arg_gmt), - common.ns_to_hour_nsec(irq.end_ts, - self._arg_multi_day, - self._arg_gmt), - '%0.03f' % ((irq.end_ts - irq.begin_ts) / 1000), - '%d' % irq.cpu_id, irqtype, irq.id, - name + raise_ts)) - - def _print_irq_stats(self, irq_stats, filter_list, header): - header_printed = False - for id in sorted(irq_stats): - if filter_list and str(id) not in filter_list: - continue - irq_stats_item = irq_stats[id] - if irq_stats_item.count == 0: + print(fmt.format(self._ns_to_hour_nsec(begin_ts), + self._ns_to_hour_nsec(end_ts), + '%0.03f' % ((end_ts - begin_ts) / 1000), + '%d' % cpu_id, irqtype, irq_do.nr, + irq_do.name + raised_ts)) + + def _validate_transform_args(self, args): + args.irq_filter_list = None + args.softirq_filter_list = None + + if args.irq: + args.irq_filter_list = args.irq.split(',') + if args.softirq: + args.softirq_filter_list = args.softirq.split(',') + + def _compute_duration_stdev(self, irq_stats_item): + if irq_stats_item.count < 2: + return float('nan') + + durations = [] + for irq in irq_stats_item.irq_list: + durations.append(irq.end_ts - irq.begin_ts) + + return statistics.stdev(durations) + + def _compute_raise_latency_stdev(self, irq_stats_item): + if irq_stats_item.raise_count < 2: + return float('nan') + + raise_latencies = [] + for irq in irq_stats_item.irq_list: + if irq.raise_ts is None: continue - if self._arg_stats: - if self._arg_freq or not header_printed: - print(header) - header_printed = True + raise_latencies.append(irq.begin_ts - irq.raise_ts) - if type(irq_stats_item) is core_irq.HardIrqStats: - self._print_hard_irq_stats_item(irq_stats_item, id) - else: - self._print_soft_irq_stats_item(irq_stats_item, id) + return statistics.stdev(raise_latencies) + + def _print_frequency_distribution(self, freq_table): + title_fmt = 'Handler duration frequency distribution {}' + + graph = termgraph.FreqGraph( + data=freq_table.rows, + get_value=lambda row: row.count.value, + get_lower_bound=lambda row: row.duration_lower.to_us(), + title=title_fmt.format(freq_table.subtitle), + unit='µs' + ) + + graph.print_graph() - if self._arg_freq: - self._print_frequency_distribution(irq_stats_item, id) + def _filter_irq(self, irq): + if type(irq) is sv.HardIRQ: + if self._args.irq_filter_list: + return str(irq.id) in self._args.irq_filter_list + if self._args.softirq_filter_list: + return False + else: # SoftIRQ + if self._args.softirq_filter_list: + return str(irq.id) in self._args.softirq_filter_list + if self._args.irq_filter_list: + return False - print() + return True - def _print_hard_irq_stats_item(self, irq_stats_item, id): - output_str = self._get_duration_stats_str(irq_stats_item, id) + def _print_hard_irq_stats_row(self, row): + output_str = self._get_duration_stats_str(row) print(output_str) - def _print_soft_irq_stats_item(self, irq_stats_item, id): - output_str = self._get_duration_stats_str(irq_stats_item, id) - if irq_stats_item.raise_count != 0: - output_str += self._get_raise_latency_str(irq_stats_item, id) + def _print_soft_irq_stats_row(self, row): + output_str = self._get_duration_stats_str(row) + + if row.raise_count.value != 0: + output_str += self._get_raise_latency_str(row) print(output_str) - def _get_duration_stats_str(self, irq_stats_item, id): + def _get_duration_stats_str(self, row): format_str = '{:<3} {:<18} {:>5} {:>12} {:>12} {:>12} {:>12} {:<2}' + irq_do = row.irq + count = row.count.value + min_duration = row.min_duration.to_us() + avg_duration = row.avg_duration.to_us() + max_duration = row.max_duration.to_us() - avg_duration = irq_stats_item.total_duration / irq_stats_item.count - duration_stdev = self._compute_duration_stdev(irq_stats_item) - min_duration = irq_stats_item.min_duration - max_duration = irq_stats_item.max_duration - # ns to µs - avg_duration /= 1000 - duration_stdev /= 1000 - min_duration /= 1000 - max_duration /= 1000 - - if math.isnan(duration_stdev): + if type(row.stdev_duration) is mi.Unknown: duration_stdev_str = '?' else: - duration_stdev_str = '%0.03f' % duration_stdev + duration_stdev_str = '%0.03f' % row.stdev_duration.to_us() - output_str = format_str.format('%d:' % id, - '<%s>' % irq_stats_item.name, - '%d' % irq_stats_item.count, + output_str = format_str.format('%d:' % irq_do.nr, + '<%s>' % irq_do.name, + '%d' % count, '%0.03f' % min_duration, '%0.03f' % avg_duration, '%0.03f' % max_duration, @@ -267,102 +548,151 @@ ' |') return output_str - def _get_raise_latency_str(self, irq_stats_item, id): + def _get_raise_latency_str(self, row): format_str = ' {:>6} {:>12} {:>12} {:>12} {:>12}' + raise_count = row.raise_count.value + min_raise_latency = row.min_latency.to_us() + avg_raise_latency = row.avg_latency.to_us() + max_raise_latency = row.max_latency.to_us() - avg_raise_latency = (irq_stats_item.total_raise_latency / - irq_stats_item.raise_count) - raise_latency_stdev = self._compute_raise_latency_stdev(irq_stats_item) - min_raise_latency = irq_stats_item.min_raise_latency - max_raise_latency = irq_stats_item.max_raise_latency - # ns to µs - avg_raise_latency /= 1000 - raise_latency_stdev /= 1000 - min_raise_latency /= 1000 - max_raise_latency /= 1000 - - if math.isnan(raise_latency_stdev): + if type(row.stdev_latency) is mi.Unknown: raise_latency_stdev_str = '?' else: - raise_latency_stdev_str = '%0.03f' % raise_latency_stdev + raise_latency_stdev_str = '%0.03f' % row.stdev_latency.to_us() - output_str = format_str.format(irq_stats_item.raise_count, + output_str = format_str.format(raise_count, '%0.03f' % min_raise_latency, '%0.03f' % avg_raise_latency, '%0.03f' % max_raise_latency, '%s' % raise_latency_stdev_str) + return output_str - def _print_results(self, begin_ns, end_ns): - if self._arg_stats or self._arg_freq: - self._print_stats(begin_ns, end_ns) - if self._arg_log: - self._print_irq_log() - - def _print_stats(self, begin_ns, end_ns): - self._print_date(begin_ns, end_ns) - - if self._arg_irq_filter_list is not None or \ - self._arg_softirq_filter_list is None: - header_format = '{:<52} {:<12}\n' \ - '{:<22} {:<14} {:<12} {:<12} {:<10} {:<12}\n' - header = header_format.format( - 'Hard IRQ', 'Duration (us)', - '', 'count', 'min', 'avg', 'max', 'stdev' - ) - header += ('-' * 82 + '|') - self._print_irq_stats(self._analysis.hard_irq_stats, - self._arg_irq_filter_list, - header) - - if self._arg_softirq_filter_list is not None or \ - self._arg_irq_filter_list is None: - header_format = '{:<52} {:<52} {:<12}\n' \ - '{:<22} {:<14} {:<12} {:<12} {:<10} {:<4} ' \ - '{:<3} {:<14} {:<12} {:<12} {:<10} {:<12}\n' - header = header_format.format( - 'Soft IRQ', 'Duration (us)', - 'Raise latency (us)', '', - 'count', 'min', 'avg', 'max', 'stdev', ' |', - 'count', 'min', 'avg', 'max', 'stdev' - ) - header += '-' * 82 + '|' + '-' * 60 - self._print_irq_stats(self._analysis.softirq_stats, - self._arg_softirq_filter_list, - header) - - def _reset_total(self, start_ts): - self._analysis.reset() - - def _refresh(self, begin, end): - self._print_results(begin, end) - self._reset_total(end) + def _print_stats_freq(self, hard_stats_table, soft_stats_table, + freq_tables): + hard_header_format = '{:<52} {:<12}\n' \ + '{:<22} {:<14} {:<12} {:<12} {:<10} {:<12}\n' + hard_header = hard_header_format.format( + 'Hard IRQ', 'Duration (us)', + '', 'count', 'min', 'avg', 'max', 'stdev' + ) + hard_header += ('-' * 82 + '|') + soft_header_format = '{:<52} {:<52} {:<12}\n' \ + '{:<22} {:<14} {:<12} {:<12} {:<10} {:<4} ' \ + '{:<3} {:<14} {:<12} {:<12} {:<10} {:<12}\n' + soft_header = soft_header_format.format( + 'Soft IRQ', 'Duration (us)', + 'Raise latency (us)', '', + 'count', 'min', 'avg', 'max', 'stdev', ' |', + 'count', 'min', 'avg', 'max', 'stdev' + ) + soft_header += '-' * 82 + '|' + '-' * 60 + + if hard_stats_table.rows or soft_stats_table.rows: + stats_rows = itertools.chain(hard_stats_table.rows, + soft_stats_table.rows) + + if freq_tables: + for stats_row, freq_table in zip(stats_rows, freq_tables): + irq = stats_row.irq + + if irq.is_hard: + print(hard_header) + self._print_hard_irq_stats_row(stats_row) + else: + print(soft_header) + self._print_soft_irq_stats_row(stats_row) + + # frequency table might be empty: do not print + if freq_table.rows: + print() + self._print_frequency_distribution(freq_table) + + print() + + else: + hard_header_printed = False + soft_header_printed = False + + for stats_row in stats_rows: + irq = stats_row.irq + + if irq.is_hard: + if not hard_header_printed: + print(hard_header) + hard_header_printed = True + + self._print_hard_irq_stats_row(stats_row) + else: + if not soft_header_printed: + if hard_header_printed: + print() + + print(soft_header) + soft_header_printed = True + + self._print_soft_irq_stats_row(stats_row) + + return + + for freq_table in freq_tables: + # frequency table might be empty: do not print + if freq_table.rows: + print() + self._print_frequency_distribution(freq_table) def _add_arguments(self, ap): + Command._add_min_max_args(ap) + Command._add_freq_args( + ap, help='Output the frequency distribution of handler durations') + Command._add_log_args( + ap, help='Output the IRQs in chronological order') + Command._add_stats_args(ap, help='Output IRQ statistics') ap.add_argument('--irq', type=str, default=None, - help='Show results only for the list of IRQ') + help='Output results only for the list of IRQ') ap.add_argument('--softirq', type=str, default=None, - help='Show results only for the list of ' - 'SoftIRQ') + help='Output results only for the list of SoftIRQ') + + +def _run(mi_mode): + irqcmd = IrqAnalysisCommand(mi_mode=mi_mode) + irqcmd.run() + + +def _runstats(mi_mode): + sys.argv.insert(1, '--stats') + _run(mi_mode) + + +def _runlog(mi_mode): + sys.argv.insert(1, '--log') + _run(mi_mode) + + +def _runfreq(mi_mode): + sys.argv.insert(1, '--freq') + _run(mi_mode) -# entry point def runstats(): - # create command - irqcmd = IrqAnalysisCommand() - # execute command - irqcmd.run_stats() + _runstats(mi_mode=False) def runlog(): - # create command - irqcmd = IrqAnalysisCommand() - # execute command - irqcmd.run_log() + _runlog(mi_mode=False) def runfreq(): - # create command - irqcmd = IrqAnalysisCommand() - # execute command - irqcmd.run_freq() + _runfreq(mi_mode=False) + + +def runstats_mi(): + _runstats(mi_mode=True) + + +def runlog_mi(): + _runlog(mi_mode=True) + + +def runfreq_mi(): + _runfreq(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/memtop.py lttnganalyses-0.4.3/lttnganalyses/cli/memtop.py --- lttnganalyses-0.3.0/lttnganalyses/cli/memtop.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/memtop.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,9 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque +# 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,119 +22,178 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import operator from .command import Command from ..core import memtop -from ..ascii_graph import Pyasciigraph -import operator +from . import mi +from . import termgraph class Memtop(Command): _DESC = """The memtop command.""" - - def __init__(self): - super().__init__(self._add_arguments, enable_proc_filter_args=True) - - def _validate_transform_args(self): - pass - - def run(self): - # parse arguments first - self._parse_args() - # validate, transform and save specific arguments - self._validate_transform_args() - # open the trace - self._open_trace() - # create the appropriate analysis/analyses - self._create_analysis() - # run the analysis - self._run_analysis(self._reset_total, self._refresh) - # print results - self._print_results(self.start_ns, self.trace_end_ts) - # close the trace - self._close_trace() - - def _create_analysis(self): - self._analysis = memtop.Memtop(self.state) - - def _reset_total(self, start_ts): - self._analysis.reset() - - def _refresh(self, begin, end): - self._print_results(begin, end) - self._reset_total(end) - - def _print_results(self, begin_ns, end_ns): - self._print_date(begin_ns, end_ns) - self._print_per_tid_alloc() - self._print_per_tid_freed() - self._print_total_alloc_freed() - - def _print_per_tid_alloc(self): - graph = Pyasciigraph() - values = [] + _ANALYSIS_CLASS = memtop.Memtop + _MI_TITLE = 'Top memory usage' + _MI_DESCRIPTION = 'Per-TID top allocated/freed memory' + _MI_TAGS = [mi.Tags.MEMORY, mi.Tags.TOP] + _MI_TABLE_CLASS_ALLOCD = 'allocd' + _MI_TABLE_CLASS_FREED = 'freed' + _MI_TABLE_CLASS_TOTAL = 'total' + _MI_TABLE_CLASS_SUMMARY = 'summary' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_ALLOCD, + 'Per-TID top allocated memory', [ + ('process', 'Process', mi.Process), + ('pages', 'Allocated pages', mi.Integer, 'pages'), + ] + ), + ( + _MI_TABLE_CLASS_FREED, + 'Per-TID top freed memory', [ + ('process', 'Process', mi.Process), + ('pages', 'Freed pages', mi.Integer, 'pages'), + ] + ), + ( + _MI_TABLE_CLASS_TOTAL, + 'Total allocated/freed memory', [ + ('allocd', 'Total allocated pages', mi.Integer, 'pages'), + ('freed', 'Total freed pages', mi.Integer, 'pages'), + ] + ), + ( + _MI_TABLE_CLASS_SUMMARY, + 'Memory usage - summary', [ + ('time_range', 'Time range', mi.TimeRange), + ('allocd', 'Total allocated pages', mi.Integer, 'pages'), + ('freed', 'Total freed pages', mi.Integer, 'pages'), + ] + ), + ] + + def _analysis_tick(self, begin_ns, end_ns): + allocd_table = self._get_per_tid_allocd_result_table(begin_ns, end_ns) + freed_table = self._get_per_tid_freed_result_table(begin_ns, end_ns) + total_table = self._get_total_result_table(begin_ns, end_ns) + + if self._mi_mode: + self._mi_append_result_table(allocd_table) + self._mi_append_result_table(freed_table) + self._mi_append_result_table(total_table) + else: + self._print_date(begin_ns, end_ns) + self._print_per_tid_allocd(allocd_table) + self._print_per_tid_freed(freed_table) + self._print_total(total_table) + + def _create_summary_result_tables(self): + total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) + begin = total_tables[0].timerange.begin + end = total_tables[-1].timerange.end + summary_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, + begin, end) + + for total_table in total_tables: + total_allocd = total_table.rows[0].allocd + total_freed = total_table.rows[0].freed + summary_table.append_row( + time_range=total_table.timerange, + allocd=total_allocd, + freed=total_freed, + ) + + self._mi_clear_result_tables() + self._mi_append_result_table(summary_table) + + def _get_per_tid_attr_result_table(self, table_class, attr, + begin_ns, end_ns): + result_table = self._mi_create_result_table(table_class, + begin_ns, end_ns) count = 0 for tid in sorted(self._analysis.tids.values(), - key=operator.attrgetter('allocated_pages'), + key=operator.attrgetter(attr), reverse=True): - if not self._filter_process(tid): - continue - - values.append(('%s (%d)' % (tid.comm, tid.tid), - tid.allocated_pages)) - + result_table.append_row( + process=mi.Process(tid.comm, tid=tid.tid), + pages=mi.Integer(getattr(tid, attr)), + ) count += 1 - if self._arg_limit > 0 and count >= self._arg_limit: - break - - for line in graph.graph('Per-TID Memory Allocations', values, - unit=' pages'): - print(line) - - def _print_per_tid_freed(self): - graph = Pyasciigraph() - values = [] - count = 0 - for tid in sorted(self._analysis.tids.values(), - key=operator.attrgetter('freed_pages'), - reverse=True): - if not self._filter_process(tid): - continue - - values.append(('%s (%d)' % (tid.comm, tid.tid), tid.freed_pages)) - - count += 1 - if self._arg_limit > 0 and count >= self._arg_limit: + if self._args.limit > 0 and count >= self._args.limit: break - for line in graph.graph('Per-TID Memory Deallocation', values, - unit=' pages'): - print(line) + return result_table - def _print_total_alloc_freed(self): + def _get_per_tid_allocd_result_table(self, begin_ns, end_ns): + return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_ALLOCD, + 'allocated_pages', + begin_ns, end_ns) + + def _get_per_tid_freed_result_table(self, begin_ns, end_ns): + return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_FREED, + 'freed_pages', + begin_ns, end_ns) + + def _get_total_result_table(self, begin_ns, end_ns): + result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, + begin_ns, end_ns) alloc = 0 freed = 0 for tid in self._analysis.tids.values(): - if not self._filter_process(tid): - continue - alloc += tid.allocated_pages freed += tid.freed_pages + result_table.append_row( + allocd=mi.Integer(alloc), + freed=mi.Integer(freed), + ) + + return result_table + + def _print_per_tid_result(self, result_table, title): + graph = termgraph.BarGraph( + title=title, + unit='pages', + get_value=lambda row: row.pages.value, + get_label=lambda row: '%s (%d)' % (row.process.name, + row.process.tid), + label_header='Process', + data=result_table.rows + ) + + graph.print_graph() + + def _print_per_tid_allocd(self, result_table): + self._print_per_tid_result(result_table, 'Per-TID Memory Allocations') + + def _print_per_tid_freed(self, result_table): + self._print_per_tid_result(result_table, + 'Per-TID Memory Deallocations') + + def _print_total(self, result_table): + alloc = result_table.rows[0].allocd.value + freed = result_table.rows[0].freed.value print('\nTotal memory usage:\n- %d pages allocated\n- %d pages freed' % (alloc, freed)) def _add_arguments(self, ap): - # specific argument - pass + Command._add_proc_filter_args(ap) + Command._add_top_args(ap) -# entry point +def _run(mi_mode): + memtopcmd = Memtop(mi_mode=mi_mode) + memtopcmd.run() + + +# entry point (human) def run(): - # create command - memtopcmd = Memtop() + _run(mi_mode=False) - # execute command - memtopcmd.run() + +# entry point (MI) +def run_mi(): + _run(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/mi.py lttnganalyses-0.4.3/lttnganalyses/cli/mi.py --- lttnganalyses-0.3.0/lttnganalyses/cli/mi.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/mi.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,525 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Philippe Proulx +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from collections import namedtuple + + +class Tags: + CPU = 'cpu' + MEMORY = 'memory' + INTERRUPT = 'interrupt' + SCHED = 'sched' + SYSCALL = 'syscall' + IO = 'io' + TOP = 'top' + STATS = 'stats' + FREQ = 'freq' + LOG = 'log' + + +class ColumnDescription: + def __init__(self, key, title, do_class, unit=None): + self._key = key + self._title = title + self._do_class = do_class + self._unit = unit + + @property + def key(self): + return self._key + + def to_native_object(self): + obj = { + 'title': self._title, + 'class': self._do_class, + } + + if self._unit: + obj['unit'] = self._unit + + return obj + + +class TableClass: + def __init__(self, name, title, column_descriptions_tuples=None, + inherit=None): + if column_descriptions_tuples is None: + column_descriptions_tuples = [] + + self._inherit = inherit + self._name = name + self._title = title + self._column_descriptions = [] + + for column_descr_tuple in column_descriptions_tuples: + key = column_descr_tuple[0] + title = column_descr_tuple[1] + do_type = column_descr_tuple[2] + unit = None + + if len(column_descr_tuple) > 3: + unit = column_descr_tuple[3] + + column_descr = ColumnDescription(key, title, do_type.CLASS, unit) + self._column_descriptions.append(column_descr) + + @property + def name(self): + return self._name + + @property + def title(self): + return self._title + + def to_native_object(self): + obj = {} + column_descrs = self._column_descriptions + native_column_descrs = [c.to_native_object() for c in column_descrs] + + if self._inherit is not None: + obj['inherit'] = self._inherit + + if self._title is not None: + obj['title'] = self._title + + if native_column_descrs: + obj['column-descriptions'] = native_column_descrs + + return obj + + def get_column_named_tuple(self): + keys = [cd.key for cd in self._column_descriptions] + + return namedtuple('Column', keys) + + +class ResultTable: + def __init__(self, table_class, begin, end, subtitle=None): + self._table_class = table_class + self._column_named_tuple = table_class.get_column_named_tuple() + self._subtitle = subtitle + self._timerange = TimeRange(begin, end) + self._rows = [] + + @property + def table_class(self): + return self._table_class + + @property + def timerange(self): + return self._timerange + + @property + def title(self): + return self._table_class.title + + @property + def subtitle(self): + return self._subtitle + + def append_row(self, **kwargs): + row = self._column_named_tuple(**kwargs) + self._rows.append(row) + + def append_row_tuple(self, row_tuple): + self._rows.append(row_tuple) + + @property + def rows(self): + return self._rows + + def to_native_object(self): + obj = { + 'class': self._table_class.name, + 'time-range': self._timerange.to_native_object(), + } + row_objs = [] + + if self._table_class.name: + if self._subtitle is not None: + full_title = '{} [{}]'.format(self.title, self._subtitle) + table_class = TableClass(None, full_title, + inherit=self._table_class.name) + self._table_class = table_class + + if self._table_class.name is None: + obj['class'] = self._table_class.to_native_object() + + for row in self._rows: + row_obj = [] + + for cell in row: + row_obj.append(cell.to_native_object()) + + row_objs.append(row_obj) + + obj['data'] = row_objs + + return obj + + +class _DataObject: + def to_native_object(self): + raise NotImplementedError + + def __eq__(self, other): + # ensure we're comparing the same type first + if not isinstance(other, self.__class__): + return False + + # call specific equality method + return self._eq(other) + + def _eq(self, other): + raise NotImplementedError + + +class _UnstructuredDataObject(_DataObject): + def __init__(self, value): + self._value = value + + @property + def value(self): + return self._value + + def to_native_object(self): + return self._value + + def __str__(self): + return str(self._value) + + def _eq(self, other): + return self._value == other._value + + +class _StructuredDataObject(_DataObject): + def to_native_object(self): + base = {'class': self.CLASS} + base.update(self._to_native_object()) + + return base + + def _to_native_object(self): + raise NotImplementedError + + +class Boolean(_UnstructuredDataObject): + CLASS = 'bool' + + +class Integer(_UnstructuredDataObject): + CLASS = 'int' + + +class Float(_UnstructuredDataObject): + CLASS = 'float' + + +class String(_UnstructuredDataObject): + CLASS = 'string' + + +class Empty(_DataObject): + def to_native_object(self): + return None + + def _eq(self, other): + return True + + +class Unknown(_StructuredDataObject): + CLASS = 'unknown' + + def _to_native_object(self): + return {} + + def _eq(self, other): + return True + + def __str__(self): + return '?' + + +class _SimpleValue(_StructuredDataObject): + def __init__(self, value): + self._value = value + + @property + def value(self): + return self._value + + def _to_native_object(self): + return {'value': self._value} + + def __str__(self): + return str(self._value) + + def _eq(self, other): + return self._value == other._value + + +class _SimpleName(_StructuredDataObject): + def __init__(self, name): + self._name = name + + @property + def name(self): + return self._name + + def _to_native_object(self): + return {'name': self._name} + + def __str__(self): + return self._name + + def _eq(self, other): + return self._name == other._name + + +class Ratio(_SimpleValue): + CLASS = 'ratio' + + @classmethod + def from_percentage(cls, value): + return cls(value / 100) + + def to_percentage(self): + return self._value * 100 + + +class Timestamp(_SimpleValue): + CLASS = 'timestamp' + + +class Duration(_SimpleValue): + CLASS = 'duration' + + @classmethod + def from_ms(cls, ms): + return cls(ms * 1000000) + + @classmethod + def from_us(cls, us): + return cls(us * 1000) + + def to_ms(self): + return self._value / 1000000 + + def to_us(self): + return self._value / 1000 + + +class Size(_SimpleValue): + CLASS = 'size' + + +class Bitrate(_SimpleValue): + CLASS = 'bitrate' + + @classmethod + def from_size_duration(cls, size, duration): + return cls(size * 8 / duration) + + +class TimeRange(_StructuredDataObject): + CLASS = 'time-range' + + def __init__(self, begin, end): + self._begin = begin + self._end = end + + @property + def begin(self): + return self._begin + + @property + def end(self): + return self._end + + def _to_native_object(self): + return {'begin': self._begin, 'end': self._end} + + def _eq(self, other): + return (self._begin, self._end) == (other._begin, other._end) + + +class Syscall(_SimpleName): + CLASS = 'syscall' + + +class Process(_StructuredDataObject): + CLASS = 'process' + + def __init__(self, name=None, pid=None, tid=None): + self._name = name + self._pid = pid + self._tid = tid + + @property + def name(self): + return self._name + + @property + def pid(self): + return self._pid + + @property + def tid(self): + return self._tid + + def _to_native_object(self): + ret_dict = {} + + if self._name is not None: + ret_dict['name'] = self._name + + if self._pid is not None: + ret_dict['pid'] = self._pid + + if self._tid is not None: + ret_dict['tid'] = self._tid + + return ret_dict + + def _eq(self, other): + self_tuple = (self._name, self._pid, self._tid) + other_tuple = (other._name, other._pid, other._tid) + + return self_tuple == other_tuple + + +class Path(_StructuredDataObject): + CLASS = 'path' + + def __init__(self, path): + self._path = path + + @property + def path(self): + return self._path + + def _to_native_object(self): + return {'path': self._path} + + def _eq(self, other): + return self._path == other._path + + +class Fd(_StructuredDataObject): + CLASS = 'fd' + + def __init__(self, fd): + self._fd = fd + + @property + def fd(self): + return self._fd + + def _to_native_object(self): + return {'fd': self._fd} + + def _eq(self, other): + return self._fd == other._fd + + +class Irq(_StructuredDataObject): + CLASS = 'irq' + + def __init__(self, is_hard, nr, name=None): + self._is_hard = is_hard + self._nr = nr + self._name = name + + @property + def is_hard(self): + return self._is_hard + + @property + def nr(self): + return self._nr + + @property + def name(self): + return self._name + + def _to_native_object(self): + obj = {'hard': self._is_hard, 'nr': self._nr} + + if self._name is not None: + obj['name'] = self._name + + return obj + + def _eq(self, other): + self_tuple = (self._is_hard, self._nr, self._name) + other_tuple = (other._is_hard, other._nr, other._name) + + return self_tuple == other_tuple + + +class Cpu(_StructuredDataObject): + CLASS = 'cpu' + + def __init__(self, cpu_id): + self._id = cpu_id + + @property + def id(self): + return self._id + + def _to_native_object(self): + return {'id': self._id} + + def _eq(self, other): + return self._id == other._id + + +class Disk(_SimpleName): + CLASS = 'disk' + + +class Partition(_SimpleName): + CLASS = 'part' + + +class NetIf(_SimpleName): + CLASS = 'netif' + + +def get_metadata(version, title, description, authors, url, tags, + table_classes): + t_classes = {t.name: t.to_native_object() for t in table_classes} + + return { + 'version': { + 'major': version.major, + 'minor': version.minor, + 'patch': version.patch, + 'extra': version.extra + }, + 'title': title, + 'description': description, + 'authors': authors, + 'url': url, + 'tags': tags, + 'table-classes': t_classes, + } diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/progressbar.py lttnganalyses-0.4.3/lttnganalyses/cli/progressbar.py --- lttnganalyses-0.3.0/lttnganalyses/cli/progressbar.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/progressbar.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -35,24 +33,24 @@ BYTES_PER_EVENT = 30 -def getFolderSize(folder): +def get_folder_size(folder): total_size = os.path.getsize(folder) for item in os.listdir(folder): itempath = os.path.join(folder, item) if os.path.isfile(itempath): total_size += os.path.getsize(itempath) elif os.path.isdir(itempath): - total_size += getFolderSize(itempath) + total_size += get_folder_size(itempath) return total_size def progressbar_setup(obj): - if hasattr(obj, '_arg_no_progress') and obj._arg_no_progress: + if obj._args.no_progress: obj.pbar = None return if progressbar_available: - size = getFolderSize(obj._arg_path) + size = get_folder_size(obj._args.path) widgets = ['Processing the trace: ', Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' '] # see docs for other options @@ -62,15 +60,15 @@ else: print('Warning: progressbar module not available, ' 'using --no-progress.', file=sys.stderr) - obj._arg_no_progress = True + obj._args.no_progress = True obj.pbar = None obj.event_count = 0 def progressbar_update(obj): - if hasattr(obj, '_arg_no_progress') and \ - (obj._arg_no_progress or obj.pbar is None): + if obj._args.no_progress or obj.pbar is None: return + try: obj.pbar.update(obj.event_count) except ValueError: @@ -79,6 +77,6 @@ def progressbar_finish(obj): - if hasattr(obj, '_arg_no_progress') and obj._arg_no_progress: + if obj._args.no_progress: return obj.pbar.finish() diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/sched.py lttnganalyses-0.4.3/lttnganalyses/cli/sched.py --- lttnganalyses-0.3.0/lttnganalyses/cli/sched.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/sched.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,896 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys +import math +import operator +import statistics +import collections +from . import mi +from . import termgraph +from ..core import sched +from .command import Command +from ..common import format_utils +from ..linuxautomaton import common + + +_SchedStats = collections.namedtuple('_SchedStats', [ + 'count', + 'min', + 'max', + 'stdev', + 'total', +]) + + +class SchedAnalysisCommand(Command): + _DESC = """The sched command.""" + _ANALYSIS_CLASS = sched.SchedAnalysis + _MI_TITLE = 'Scheduling latencies analysis' + _MI_DESCRIPTION = \ + 'Scheduling latencies frequency distribution, statistics, top, and log' + _MI_TAGS = [mi.Tags.SCHED, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.TOP, + mi.Tags.LOG] + _MI_TABLE_CLASS_LOG = 'log' + _MI_TABLE_CLASS_TOP = 'top' + _MI_TABLE_CLASS_TOTAL_STATS = 'total_stats' + _MI_TABLE_CLASS_PER_TID_STATS = 'per_tid_stats' + _MI_TABLE_CLASS_PER_PRIO_STATS = 'per_prio_stats' + _MI_TABLE_CLASS_FREQ = 'freq' + # _MI_TABLE_CLASS_SUMMARY = 'summary' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_LOG, + 'Scheduling log', [ + ('wakeup_ts', 'Wakeup timestamp', mi.Timestamp), + ('switch_ts', 'Switch timestamp', mi.Timestamp), + ('latency', 'Scheduling latency', mi.Duration), + ('prio', 'Priority', mi.Integer), + ('target_cpu', 'Target CPU', mi.Integer), + ('wakee_proc', 'Wakee process', mi.Process), + ('waker_proc', 'Waker process', mi.Process), + ] + ), + ( + _MI_TABLE_CLASS_TOP, + 'Scheduling top', [ + ('wakeup_ts', 'Wakeup timestamp', mi.Timestamp), + ('switch_ts', 'Switch timestamp', mi.Timestamp), + ('latency', 'Scheduling latency', mi.Duration), + ('prio', 'Priority', mi.Integer), + ('target_cpu', 'Target CPU', mi.Integer), + ('wakee_proc', 'Wakee process', mi.Process), + ('waker_proc', 'Waker process', mi.Process), + ] + ), + ( + _MI_TABLE_CLASS_TOTAL_STATS, + 'Scheduling latency stats (total)', [ + ('count', 'Scheduling count', mi.Integer, 'schedulings'), + ('min_latency', 'Minimum latency', mi.Duration), + ('avg_latency', 'Average latency', mi.Duration), + ('max_latency', 'Maximum latency', mi.Duration), + ('stdev_latency', 'Scheduling latency standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_PER_TID_STATS, + 'Scheduling latency stats (per-TID)', [ + ('process', 'Wakee process', mi.Process), + ('count', 'Scheduling count', mi.Integer, 'schedulings'), + ('min_latency', 'Minimum latency', mi.Duration), + ('avg_latency', 'Average latency', mi.Duration), + ('max_latency', 'Maximum latency', mi.Duration), + ('stdev_latency', 'Scheduling latency standard deviation', + mi.Duration), + ('prio_list', 'Chronological priorities', mi.String), + ] + ), + ( + _MI_TABLE_CLASS_PER_PRIO_STATS, + 'Scheduling latency stats (per-prio)', [ + ('prio', 'Priority', mi.Integer), + ('count', 'Scheduling count', mi.Integer, 'schedulings'), + ('min_latency', 'Minimum latency', mi.Duration), + ('avg_latency', 'Average latency', mi.Duration), + ('max_latency', 'Maximum latency', mi.Duration), + ('stdev_latency', 'Scheduling latency standard deviation', + mi.Duration), + ] + ), + ( + _MI_TABLE_CLASS_FREQ, + 'Scheduling latency frequency distribution', [ + ('duration_lower', 'Duration (lower bound)', mi.Duration), + ('duration_upper', 'Duration (upper bound)', mi.Duration), + ('count', 'Scheduling count', mi.Integer, 'schedulings'), + ] + ), + ] + + def _analysis_tick(self, begin_ns, end_ns): + log_table = None + top_table = None + total_stats_table = None + per_tid_stats_table = None + per_prio_stats_table = None + total_freq_tables = None + per_tid_freq_tables = None + per_prio_freq_tables = None + + if self._args.log: + log_table = self._get_log_result_table(begin_ns, end_ns) + + if self._args.top: + top_table = self._get_top_result_table(begin_ns, end_ns) + + if self._args.stats: + if self._args.total: + total_stats_table = self._get_total_stats_result_table( + begin_ns, end_ns) + + if self._args.per_tid: + per_tid_stats_table = self._get_per_tid_stats_result_table( + begin_ns, end_ns) + + if self._args.per_prio: + per_prio_stats_table = self._get_per_prio_stats_result_table( + begin_ns, end_ns) + + if self._args.freq: + if self._args.total: + total_freq_tables = self._get_total_freq_result_tables( + begin_ns, end_ns) + + if self._args.per_tid: + per_tid_freq_tables = self._get_per_tid_freq_result_tables( + begin_ns, end_ns) + + if self._args.per_prio: + per_prio_freq_tables = self._get_per_prio_freq_result_tables( + begin_ns, end_ns) + + if self._mi_mode: + if log_table: + self._mi_append_result_table(log_table) + + if top_table: + self._mi_append_result_table(top_table) + + if total_stats_table and total_stats_table.rows: + self._mi_append_result_table(total_stats_table) + + if per_tid_stats_table and per_tid_stats_table.rows: + self._mi_append_result_table(per_tid_stats_table) + + if per_prio_stats_table and per_prio_stats_table.rows: + self._mi_append_result_table(per_prio_stats_table) + + if self._args.freq_series: + if total_freq_tables: + self._mi_append_result_tables(total_freq_tables) + + if per_tid_freq_tables: + per_tid_freq_tables = [ + self._get_per_tid_freq_series_table( + per_tid_freq_tables) + ] + + self._mi_append_result_tables(per_tid_freq_tables) + + if per_prio_freq_tables: + per_prio_freq_tables = [ + self._get_per_prio_freq_series_table( + per_prio_freq_tables) + ] + + self._mi_append_result_tables(per_prio_freq_tables) + else: + self._print_date(begin_ns, end_ns) + + if self._args.stats: + if total_stats_table: + self._print_total_stats(total_stats_table) + if per_tid_stats_table: + self._print_per_tid_stats(per_tid_stats_table) + if per_prio_stats_table: + self._print_per_prio_stats(per_prio_stats_table) + + if self._args.freq: + if total_freq_tables: + self._print_freq(total_freq_tables) + if per_tid_freq_tables: + self._print_freq(per_tid_freq_tables) + if per_prio_freq_tables: + self._print_freq(per_prio_freq_tables) + + if log_table: + self._print_sched_events(log_table) + + if top_table: + self._print_sched_events(top_table) + + def _get_total_sched_lists_stats(self): + total_list = self._analysis.sched_list + stdev = self._compute_sched_latency_stdev(total_list) + total_stats = _SchedStats( + count=self._analysis.count, + min=self._analysis.min_latency, + max=self._analysis.max_latency, + stdev=stdev, + total=self._analysis.total_latency + ) + + return [total_list], total_stats + + def _get_tid_sched_lists_stats(self): + tid_sched_lists = {} + tid_stats = {} + + for sched_event in self._analysis.sched_list: + tid = sched_event.wakee_proc.tid + if tid not in tid_sched_lists: + tid_sched_lists[tid] = [] + + tid_sched_lists[tid].append(sched_event) + + for tid in tid_sched_lists: + sched_list = tid_sched_lists[tid] + + if not sched_list: + continue + + stdev = self._compute_sched_latency_stdev(sched_list) + latencies = [sched.latency for sched in sched_list] + count = len(latencies) + min_latency = min(latencies) + max_latency = max(latencies) + total_latency = sum(latencies) + + tid_stats[tid] = _SchedStats( + count=count, + min=min_latency, + max=max_latency, + stdev=stdev, + total=total_latency, + ) + + return tid_sched_lists, tid_stats + + def _get_prio_sched_lists_stats(self): + prio_sched_lists = {} + prio_stats = {} + + for sched_event in self._analysis.sched_list: + if sched_event.prio not in prio_sched_lists: + prio_sched_lists[sched_event.prio] = [] + + prio_sched_lists[sched_event.prio].append(sched_event) + + for prio in prio_sched_lists: + sched_list = prio_sched_lists[prio] + + if not sched_list: + continue + + stdev = self._compute_sched_latency_stdev(sched_list) + latencies = [sched.latency for sched in sched_list] + count = len(latencies) + min_latency = min(latencies) + max_latency = max(latencies) + total_latency = sum(latencies) + + prio_stats[prio] = _SchedStats( + count=count, + min=min_latency, + max=max_latency, + stdev=stdev, + total=total_latency, + ) + + return prio_sched_lists, prio_stats + + def _get_log_result_table(self, begin_ns, end_ns): + result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG, + begin_ns, end_ns) + + for sched_event in self._analysis.sched_list: + wakee_proc = mi.Process(sched_event.wakee_proc.comm, + sched_event.wakee_proc.pid, + sched_event.wakee_proc.tid) + + if sched_event.waker_proc: + waker_proc = mi.Process(sched_event.waker_proc.comm, + sched_event.waker_proc.pid, + sched_event.waker_proc.tid) + else: + waker_proc = mi.Empty() + + result_table.append_row( + wakeup_ts=mi.Timestamp(sched_event.wakeup_ts), + switch_ts=mi.Timestamp(sched_event.switch_ts), + latency=mi.Duration(sched_event.latency), + prio=mi.Integer(sched_event.prio), + target_cpu=mi.Integer(sched_event.target_cpu), + wakee_proc=wakee_proc, + waker_proc=waker_proc, + ) + + return result_table + + def _get_top_result_table(self, begin_ns, end_ns): + result_table = self._mi_create_result_table( + self._MI_TABLE_CLASS_TOP, begin_ns, end_ns) + + top_events = sorted(self._analysis.sched_list, + key=operator.attrgetter('latency'), + reverse=True) + top_events = top_events[:self._args.limit] + + for sched_event in top_events: + wakee_proc = mi.Process(sched_event.wakee_proc.comm, + sched_event.wakee_proc.pid, + sched_event.wakee_proc.tid) + + if sched_event.waker_proc: + waker_proc = mi.Process(sched_event.waker_proc.comm, + sched_event.waker_proc.pid, + sched_event.waker_proc.tid) + else: + waker_proc = mi.Empty() + + result_table.append_row( + wakeup_ts=mi.Timestamp(sched_event.wakeup_ts), + switch_ts=mi.Timestamp(sched_event.switch_ts), + latency=mi.Duration(sched_event.latency), + prio=mi.Integer(sched_event.prio), + target_cpu=mi.Integer(sched_event.target_cpu), + wakee_proc=wakee_proc, + waker_proc=waker_proc, + ) + + return result_table + + def _get_total_stats_result_table(self, begin_ns, end_ns): + stats_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL_STATS, + begin_ns, end_ns) + + stdev = self._compute_sched_latency_stdev(self._analysis.sched_list) + if math.isnan(stdev): + stdev = mi.Unknown() + else: + stdev = mi.Duration(stdev) + + stats_table.append_row( + count=mi.Integer(self._analysis.count), + min_latency=mi.Duration(self._analysis.min_latency), + avg_latency=mi.Duration(self._analysis.total_latency / + self._analysis.count), + max_latency=mi.Duration(self._analysis.max_latency), + stdev_latency=stdev, + ) + + return stats_table + + def _get_per_tid_stats_result_table(self, begin_ns, end_ns): + stats_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_PER_TID_STATS, + begin_ns, end_ns) + + tid_stats_list = sorted(list(self._analysis.tids.values()), + key=lambda proc: proc.comm.lower()) + + for tid_stats in tid_stats_list: + if not tid_stats.sched_list: + continue + + stdev = self._compute_sched_latency_stdev(tid_stats.sched_list) + if math.isnan(stdev): + stdev = mi.Unknown() + else: + stdev = mi.Duration(stdev) + + prio_list = format_utils.format_prio_list(tid_stats.prio_list) + + stats_table.append_row( + process=mi.Process(tid=tid_stats.tid, name=tid_stats.comm), + count=mi.Integer(tid_stats.count), + min_latency=mi.Duration(tid_stats.min_latency), + avg_latency=mi.Duration(tid_stats.total_latency / + tid_stats.count), + max_latency=mi.Duration(tid_stats.max_latency), + stdev_latency=stdev, + prio_list=mi.String(prio_list), + ) + + return stats_table + + def _get_per_prio_stats_result_table(self, begin_ns, end_ns): + stats_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PRIO_STATS, + begin_ns, end_ns) + + _, prio_stats = self._get_prio_sched_lists_stats() + + for prio in sorted(prio_stats): + stats = prio_stats[prio] + stdev = stats.stdev + + if math.isnan(stdev): + stdev = mi.Unknown() + else: + stdev = mi.Duration(stdev) + + count = stats.count + min_latency = stats.min + max_latency = stats.max + total_latency = stats.total + + stats_table.append_row( + prio=mi.Integer(prio), + count=mi.Integer(count), + min_latency=mi.Duration(min_latency), + avg_latency=mi.Duration(total_latency / count), + max_latency=mi.Duration(max_latency), + stdev_latency=stdev, + ) + + return stats_table + + def _get_per_tid_freq_series_table(self, freq_tables): + if not freq_tables: + return + + column_infos = [ + ('duration_lower', 'Duration (lower bound)', mi.Duration), + ('duration_upper', 'Duration (upper bound)', mi.Duration), + ] + + for index, freq_table in enumerate(freq_tables): + column_infos.append(( + 'tid{}'.format(index), + freq_table.subtitle, + mi.Integer, + 'schedulings' + )) + + title = 'Scheduling latencies frequency distributions' + table_class = mi.TableClass(None, title, column_infos) + begin = freq_tables[0].timerange.begin + end = freq_tables[0].timerange.end + result_table = mi.ResultTable(table_class, begin, end) + + for row_index, freq0_row in enumerate(freq_tables[0].rows): + row_tuple = [ + freq0_row.duration_lower, + freq0_row.duration_upper, + ] + + for freq_table in freq_tables: + freq_row = freq_table.rows[row_index] + row_tuple.append(freq_row.count) + + result_table.append_row_tuple(tuple(row_tuple)) + + return result_table + + def _get_per_prio_freq_series_table(self, freq_tables): + if not freq_tables: + return + + column_infos = [ + ('duration_lower', 'Duration (lower bound)', mi.Duration), + ('duration_upper', 'Duration (upper bound)', mi.Duration), + ] + + for index, freq_table in enumerate(freq_tables): + column_infos.append(( + 'prio{}'.format(index), + freq_table.subtitle, + mi.Integer, + 'schedulings' + )) + + title = 'Scheduling latencies frequency distributions' + table_class = mi.TableClass(None, title, column_infos) + begin = freq_tables[0].timerange.begin + end = freq_tables[0].timerange.end + result_table = mi.ResultTable(table_class, begin, end) + + for row_index, freq0_row in enumerate(freq_tables[0].rows): + row_tuple = [ + freq0_row.duration_lower, + freq0_row.duration_upper, + ] + + for freq_table in freq_tables: + freq_row = freq_table.rows[row_index] + row_tuple.append(freq_row.count) + + result_table.append_row_tuple(tuple(row_tuple)) + + return result_table + + def _fill_freq_result_table(self, sched_list, stats, min_duration, + max_duration, step, freq_table): + # The number of bins for the histogram + resolution = self._args.freq_resolution + + if not self._args.freq_uniform: + if self._args.min is not None: + min_duration = self._args.min + else: + min_duration = stats.min + + if self._args.max is not None: + max_duration = self._args.max + else: + max_duration = stats.max + + # ns to µs + min_duration /= 1000 + max_duration /= 1000 + + step = (max_duration - min_duration) / resolution + + if step == 0: + return + + buckets = [] + counts = [] + + for i in range(resolution): + buckets.append(i * step) + counts.append(0) + + for sched_event in sched_list: + duration = sched_event.latency / 1000 + index = int((duration - min_duration) / step) + + if index >= resolution: + # special case for max value: put in last bucket (includes + # its upper bound) + if duration == max_duration: + counts[index - 1] += 1 + + continue + + counts[index] += 1 + + for index, count in enumerate(counts): + lower_bound = index * step + min_duration + upper_bound = (index + 1) * step + min_duration + freq_table.append_row( + duration_lower=mi.Duration.from_us(lower_bound), + duration_upper=mi.Duration.from_us(upper_bound), + count=mi.Integer(count), + ) + + def _get_total_freq_result_tables(self, begin_ns, end_ns): + freq_tables = [] + sched_lists, sched_stats = self._get_total_sched_lists_stats() + min_duration = None + max_duration = None + step = None + + if self._args.freq_uniform: + latencies = [] + + for sched_list in sched_lists: + latencies += [sched.latency for sched in sched_list] + + min_duration, max_duration, step = \ + self._get_uniform_freq_values(latencies) + + for sched_list in sched_lists: + freq_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin_ns, end_ns) + self._fill_freq_result_table(sched_list, sched_stats, min_duration, + max_duration, step, freq_table) + freq_tables.append(freq_table) + + return freq_tables + + def _get_per_tid_freq_result_tables(self, begin_ns, end_ns): + freq_tables = [] + tid_sched_lists, tid_stats = self._get_tid_sched_lists_stats() + min_duration = None + max_duration = None + step = None + + if self._args.freq_uniform: + latencies = [] + + for sched_list in tid_sched_lists.values(): + latencies += [sched.latency for sched in sched_list] + + min_duration, max_duration, step = \ + self._get_uniform_freq_values(latencies) + + for tid in sorted(tid_sched_lists): + sched_list = tid_sched_lists[tid] + stats = tid_stats[tid] + subtitle = 'TID: {}'.format(tid) + freq_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin_ns, end_ns, subtitle) + self._fill_freq_result_table(sched_list, stats, min_duration, + max_duration, step, freq_table) + freq_tables.append(freq_table) + + return freq_tables + + def _get_per_prio_freq_result_tables(self, begin_ns, end_ns): + freq_tables = [] + prio_sched_lists, prio_stats = self._get_prio_sched_lists_stats() + min_duration = None + max_duration = None + step = None + + if self._args.freq_uniform: + latencies = [] + + for sched_list in prio_sched_lists.values(): + latencies += [sched.latency for sched in sched_list] + + min_duration, max_duration, step = \ + self._get_uniform_freq_values(latencies) + + for prio in sorted(prio_sched_lists): + sched_list = prio_sched_lists[prio] + stats = prio_stats[prio] + subtitle = 'Priority: {}'.format(prio) + freq_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ, + begin_ns, end_ns, subtitle) + self._fill_freq_result_table(sched_list, stats, min_duration, + max_duration, step, freq_table) + freq_tables.append(freq_table) + + return freq_tables + + def _compute_sched_latency_stdev(self, sched_events): + if len(sched_events) < 2: + return float('nan') + + sched_latencies = [] + for sched_event in sched_events: + sched_latencies.append(sched_event.latency) + + return statistics.stdev(sched_latencies) + + def _ns_to_hour_nsec(self, ts): + return common.ns_to_hour_nsec(ts, self._args.multi_day, self._args.gmt) + + def _print_sched_events(self, result_table): + fmt = '[{:<18}, {:<18}] {:>15} {:>10} {:>3} {:<25} {:<25}' + title_fmt = '{:<20} {:<19} {:>15} {:>10} {:>3} {:<25} {:<25}' + print() + print(result_table.title) + print(title_fmt.format('Wakeup', 'Switch', 'Latency (us)', 'Priority', + 'CPU', 'Wakee', 'Waker')) + for row in result_table.rows: + wakeup_ts = row.wakeup_ts.value + switch_ts = row.switch_ts.value + latency = row.latency.value + prio = row.prio.value + target_cpu = row.target_cpu.value + wakee_proc = row.wakee_proc + waker_proc = row.waker_proc + + wakee_str = '%s (%d)' % (wakee_proc.name, wakee_proc.tid) + if isinstance(waker_proc, mi.Empty): + waker_str = 'Unknown (N/A)' + else: + waker_str = '%s (%d)' % (waker_proc.name, waker_proc.tid) + + print(fmt.format(self._ns_to_hour_nsec(wakeup_ts), + self._ns_to_hour_nsec(switch_ts), + '%0.03f' % (latency / 1000), prio, + target_cpu, wakee_str, waker_str)) + + def _print_total_stats(self, stats_table): + row_format = '{:<12} {:<12} {:<12} {:<12} {:<12}' + header = row_format.format( + 'Count', 'Min', 'Avg', 'Max', 'Stdev' + ) + + if stats_table.rows: + print() + print(stats_table.title + ' (us)') + print(header) + for row in stats_table.rows: + if type(row.stdev_latency) is mi.Unknown: + stdev_str = '?' + else: + stdev_str = '%0.03f' % row.stdev_latency.to_us() + + row_str = row_format.format( + '%d' % row.count.value, + '%0.03f' % row.min_latency.to_us(), + '%0.03f' % row.avg_latency.to_us(), + '%0.03f' % row.max_latency.to_us(), + '%s' % stdev_str, + ) + + print(row_str) + + def _print_per_tid_stats(self, stats_table): + row_format = '{:<25} {:>8} {:>12} {:>12} {:>12} {:>12} {}' + header = row_format.format( + 'Process', 'Count', 'Min', 'Avg', 'Max', 'Stdev', 'Priorities' + ) + + if stats_table.rows: + print() + print(stats_table.title + ' (us)') + print(header) + for row in stats_table.rows: + if type(row.stdev_latency) is mi.Unknown: + stdev_str = '?' + else: + stdev_str = '%0.03f' % row.stdev_latency.to_us() + + proc = row.process + proc_str = '%s (%d)' % (proc.name, proc.tid) + + row_str = row_format.format( + '%s' % proc_str, + '%d' % row.count.value, + '%0.03f' % row.min_latency.to_us(), + '%0.03f' % row.avg_latency.to_us(), + '%0.03f' % row.max_latency.to_us(), + '%s' % stdev_str, + '%s' % row.prio_list.value, + ) + + print(row_str) + + def _print_per_prio_stats(self, stats_table): + row_format = '{:>4} {:>8} {:>12} {:>12} {:>12} {:>12}' + header = row_format.format( + 'Prio', 'Count', 'Min', 'Avg', 'Max', 'Stdev' + ) + + if stats_table.rows: + print() + print(stats_table.title + ' (us)') + print(header) + for row in stats_table.rows: + if type(row.stdev_latency) is mi.Unknown: + stdev_str = '?' + else: + stdev_str = '%0.03f' % row.stdev_latency.to_us() + + row_str = row_format.format( + '%d' % row.prio.value, + '%d' % row.count.value, + '%0.03f' % row.min_latency.to_us(), + '%0.03f' % row.avg_latency.to_us(), + '%0.03f' % row.max_latency.to_us(), + '%s' % stdev_str, + ) + + print(row_str) + + def _print_frequency_distribution(self, freq_table): + title_fmt = 'Scheduling latency frequency distribution - {}' + + graph = termgraph.FreqGraph( + data=freq_table.rows, + get_value=lambda row: row.count.value, + get_lower_bound=lambda row: row.duration_lower.to_us(), + title=title_fmt.format(freq_table.subtitle), + unit='µs' + ) + + graph.print_graph() + + def _print_freq(self, freq_tables): + for freq_table in freq_tables: + self._print_frequency_distribution(freq_table) + + def _validate_transform_args(self, args): + # If neither --total nor --per-prio are specified, default + # to --per-tid + if not (args.total or args.per_prio): + args.per_tid = True + + def _add_arguments(self, ap): + Command._add_min_max_args(ap) + Command._add_proc_filter_args(ap) + Command._add_freq_args( + ap, help='Output the frequency distribution of sched switch ' + 'latencies') + Command._add_top_args(ap, help='Output the top sched switch latencies') + Command._add_log_args( + ap, help='Output the sched switches in chronological order') + Command._add_stats_args(ap, help='Output sched switch statistics') + ap.add_argument('--total', action='store_true', + help='Group all results (applies to stats and freq)') + ap.add_argument('--per-tid', action='store_true', + help='Group results per-TID (applies to stats and ' + 'freq) (default)') + ap.add_argument('--per-prio', action='store_true', + help='Group results per-prio (applies to stats and ' + 'freq)') + + +def _run(mi_mode): + schedcmd = SchedAnalysisCommand(mi_mode=mi_mode) + schedcmd.run() + + +def _runstats(mi_mode): + sys.argv.insert(1, '--stats') + _run(mi_mode) + + +def _runlog(mi_mode): + sys.argv.insert(1, '--log') + _run(mi_mode) + + +def _runtop(mi_mode): + sys.argv.insert(1, '--top') + _run(mi_mode) + + +def _runfreq(mi_mode): + sys.argv.insert(1, '--freq') + _run(mi_mode) + + +def runstats(): + _runstats(mi_mode=False) + + +def runlog(): + _runlog(mi_mode=False) + + +def runtop(): + _runtop(mi_mode=False) + + +def runfreq(): + _runfreq(mi_mode=False) + + +def runstats_mi(): + _runstats(mi_mode=True) + + +def runlog_mi(): + _runlog(mi_mode=True) + + +def runtop_mi(): + _runtop(mi_mode=True) + + +def runfreq_mi(): + _runfreq(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/syscallstats.py lttnganalyses-0.4.3/lttnganalyses/cli/syscallstats.py --- lttnganalyses-0.3.0/lttnganalyses/cli/syscallstats.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/syscallstats.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,9 +1,8 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez # 2015 - Antoine Busque +# 2015 - Philippe Proulx # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -23,58 +22,116 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from .command import Command -from ..core import syscalls - +import errno import operator import statistics -import errno +from . import mi +from ..core import syscalls +from .command import Command class SyscallsAnalysis(Command): _DESC = """The syscallstats command.""" - - def __init__(self): - super().__init__(self._add_arguments, enable_proc_filter_args=True) - - def _validate_transform_args(self): - pass - - def run(self): - self._parse_args() - self._validate_transform_args() - self._open_trace() - self._create_analysis() - self._run_analysis(self._reset_total, self._refresh) - self._print_results(self.start_ns, self.trace_end_ts) - self._close_trace() - - def _create_analysis(self): - self._analysis = syscalls.SyscallsAnalysis(self.state) - - def _refresh(self, begin, end): - self._print_results(begin, end) - - def _print_results(self, begin_ns, end_ns): - line_format = '{:<38} {:>14} {:>14} {:>14} {:>12} {:>10} {:<14}' - - self._print_date(begin_ns, end_ns) - print('Per-TID syscalls statistics (usec)') + _ANALYSIS_CLASS = syscalls.SyscallsAnalysis + _MI_TITLE = 'System call statistics' + _MI_DESCRIPTION = 'Per-TID and global system call statistics' + _MI_TAGS = [mi.Tags.SYSCALL, mi.Tags.STATS] + _MI_TABLE_CLASS_PER_TID_STATS = 'per-tid' + _MI_TABLE_CLASS_TOTAL = 'total' + _MI_TABLE_CLASS_SUMMARY = 'summary' + _MI_TABLE_CLASSES = [ + ( + _MI_TABLE_CLASS_PER_TID_STATS, + 'System call statistics', [ + ('syscall', 'System call', mi.Syscall), + ('count', 'Call count', mi.Integer, 'calls'), + ('min_duration', 'Minimum call duration', mi.Duration), + ('avg_duration', 'Average call duration', mi.Duration), + ('max_duration', 'Maximum call duration', mi.Duration), + ('stdev_duration', 'Call duration standard deviation', + mi.Duration), + ('return_values', 'Return values count', mi.String), + ] + ), + ( + _MI_TABLE_CLASS_TOTAL, + 'Per-TID system call statistics', [ + ('process', 'Process', mi.Process), + ('count', 'Total system call count', mi.Integer, 'calls'), + ] + ), + ( + _MI_TABLE_CLASS_SUMMARY, + 'System call statistics - summary', [ + ('time_range', 'Time range', mi.TimeRange), + ('process', 'Process', mi.Process), + ('count', 'Total system call count', mi.Integer, 'calls'), + ] + ), + ] + + def _analysis_tick(self, begin_ns, end_ns): + total_table, per_tid_tables = self._get_result_tables(begin_ns, end_ns) + + if self._mi_mode: + self._mi_append_result_tables(per_tid_tables) + self._mi_append_result_table(total_table) + else: + self._print_date(begin_ns, end_ns) + self._print_results(total_table, per_tid_tables) + + def _post_analysis(self): + if not self._mi_mode: + return + + if len(self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)) > 1: + self._create_summary_result_table() + + self._mi_print() + + def _create_summary_result_table(self): + total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL) + begin = total_tables[0].timerange.begin + end = total_tables[-1].timerange.end + summary_table = \ + self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY, + begin, end) + + for total_table in total_tables: + for row in total_table.rows: + process = row.process + count = row.count + summary_table.append_row( + time_range=total_table.timerange, + process=process, + count=count, + ) + + self._mi_clear_result_tables() + self._mi_append_result_table(summary_table) + + def _get_result_tables(self, begin_ns, end_ns): + per_tid_tables = [] + total_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL, + begin_ns, end_ns) for proc_stats in sorted(self._analysis.tids.values(), key=operator.attrgetter('total_syscalls'), reverse=True): - if not self._filter_process(proc_stats) or \ - proc_stats.total_syscalls == 0: + if proc_stats.total_syscalls == 0: continue pid = proc_stats.pid + if proc_stats.pid is None: pid = '?' - print(line_format.format( - '%s (%s, tid = %d)' % (proc_stats.comm, pid, proc_stats.tid), - 'Count', 'Min', 'Average', 'Max', 'Stdev', 'Return values')) + subtitle = '%s (%s, TID: %d)' % (proc_stats.comm, pid, + proc_stats.tid) + result_table = \ + self._mi_create_result_table( + self._MI_TABLE_CLASS_PER_TID_STATS, begin_ns, end_ns, + subtitle) for syscall in sorted(proc_stats.syscalls.values(), key=operator.attrgetter('count'), @@ -98,34 +155,80 @@ return_count[return_key] += 1 - min_duration = round(syscall.min_duration / 1000, 3) - max_duration = round(syscall.max_duration / 1000, 3) - avg_duration = round( - syscall.total_duration / syscall.count / 1000, 3) - if len(durations) > 2: - stdev = round(statistics.stdev(durations) / 1000, 3) + stdev = mi.Duration(statistics.stdev(durations)) else: + stdev = mi.Unknown() + + result_table.append_row( + syscall=mi.Syscall(syscall.name), + count=mi.Integer(syscall.count), + min_duration=mi.Duration(syscall.min_duration), + avg_duration=mi.Duration(syscall.total_duration / + syscall.count), + max_duration=mi.Duration(syscall.max_duration), + stdev_duration=stdev, + return_values=mi.String(str(return_count)), + ) + + per_tid_tables.append(result_table) + total_table.append_row( + process=mi.Process(proc_stats.comm, pid=proc_stats.pid, + tid=proc_stats.tid), + count=mi.Integer(proc_stats.total_syscalls), + ) + + return total_table, per_tid_tables + + def _print_results(self, total_table, per_tid_tables): + line_format = '{:<38} {:>14} {:>14} {:>14} {:>12} {:>10} {:<14}' + + print('Per-TID syscalls statistics (usec)') + total_calls = 0 + + for total_row, table in zip(total_table.rows, per_tid_tables): + print(line_format.format(table.subtitle, + 'Count', 'Min', 'Average', 'Max', + 'Stdev', 'Return values')) + for row in table.rows: + syscall_name = row.syscall.name + syscall_count = row.count.value + min_duration = round(row.min_duration.to_us(), 3) + avg_duration = round(row.avg_duration.to_us(), 3) + max_duration = round(row.max_duration.to_us(), 3) + + if type(row.stdev_duration) is mi.Unknown: stdev = '?' + else: + stdev = round(row.stdev_duration.to_us(), 3) - name = syscall.name + proc_total_calls = total_row.count.value print(line_format.format( - ' - ' + name, syscall.count, min_duration, avg_duration, - max_duration, stdev, str(return_count))) + ' - ' + syscall_name, syscall_count, min_duration, + avg_duration, max_duration, stdev, + row.return_values.value)) - print(line_format.format('Total:', proc_stats.total_syscalls, + print(line_format.format('Total:', proc_total_calls, '', '', '', '', '')) print('-' * 113) + total_calls += proc_total_calls - print('\nTotal syscalls: %d' % (self._analysis.total_syscalls)) - - def _reset_total(self, start_ts): - pass + print('\nTotal syscalls: %d' % (total_calls)) def _add_arguments(self, ap): - pass + Command._add_proc_filter_args(ap) -def run(): - syscallscmd = SyscallsAnalysis() +def _run(mi_mode): + syscallscmd = SyscallsAnalysis(mi_mode=mi_mode) syscallscmd.run() + + +# entry point (human) +def run(): + _run(mi_mode=False) + + +# entry point (MI) +def run_mi(): + _run(mi_mode=True) diff -Nru lttnganalyses-0.3.0/lttnganalyses/cli/termgraph.py lttnganalyses-0.4.3/lttnganalyses/cli/termgraph.py --- lttnganalyses-0.3.0/lttnganalyses/cli/termgraph.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/cli/termgraph.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,202 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from collections import namedtuple + + +GraphDatum = namedtuple('GraphDatum', ['value', 'value_str']) +BarGraphDatum = namedtuple('BarGraphDatum', ['value', 'value_str', 'label']) +FreqGraphDatum = namedtuple( + 'FreqGraphDatum', ['value', 'value_str', 'lower_bound'] +) + +class Graph(): + MAX_GRAPH_WIDTH = 80 + BAR_CHAR = '█' + HR_CHAR = '#' + + def __init__(self, data, get_value, get_value_str, title, unit): + self._data = data + self._get_value = get_value + self._title = title + self._unit = unit + self._max_value = 0 + self._max_value_len = 0 + + if get_value_str is not None: + self._get_value_str_cb = get_value_str + else: + self._get_value_str_cb = Graph._get_value_str_default + + def _transform_data(self, data): + graph_data = [] + + for datum in data: + graph_datum = self._get_graph_datum(datum) + + if graph_datum.value > self._max_value: + self._max_value = graph_datum.value + if len(graph_datum.value_str) > self._max_value_len: + self._max_value_len = len(graph_datum.value_str) + + graph_data.append(graph_datum) + + return graph_data + + def _get_value_str(self, value): + return self._get_value_str_cb(value) + + def _get_graph_datum(self, datum): + value = self._get_value(datum) + value_str = self._get_value_str(value) + + return GraphDatum(value, value_str) + + def _print_header(self): + if self._title: + print(self._title) + + def _print_separator(self): + print(self.HR_CHAR * self.MAX_GRAPH_WIDTH) + + def _print_body(self): + raise NotImplementedError() + + def print_graph(self): + if not self._data: + return + + self._print_header() + self._print_separator() + self._print_body() + print() + + @staticmethod + def _get_value_str_default(value): + if isinstance(value, float): + value_str = '{:0.02f}'.format(value) + else: + value_str = str(value) + + return value_str + + +class BarGraph(Graph): + def __init__(self, data, get_value, get_label, get_value_str=None, + title=None, label_header=None, unit=None): + super().__init__(data, get_value, get_value_str, title, unit) + + self._get_label = get_label + self._label_header = label_header + self._data = self._transform_data(self._data) + + def _get_graph_datum(self, datum): + value = self._get_value(datum) + value_str = self._get_value_str(value) + label = self._get_label(datum) + + return BarGraphDatum(value, value_str, label) + + def _get_value_str(self, value): + value_str = super()._get_value_str(value) + if self._unit: + value_str += ' ' + self._unit + + return value_str + + def _get_graph_header(self): + if not self._label_header: + return self._title + + title_len = len(self._title) + space_width = (self.MAX_GRAPH_WIDTH - title_len) + \ + 1 + self._max_value_len + 1 + + return self._title + ' ' * space_width + self._label_header + + def _print_header(self): + header = self._get_graph_header() + print(header) + + def _get_bar_str(self, datum): + if self._max_value == 0: + bar_width = 0 + else: + bar_width = int(self.MAX_GRAPH_WIDTH * datum.value / + self._max_value) + space_width = self.MAX_GRAPH_WIDTH - bar_width + bar_str = self.BAR_CHAR * bar_width + ' ' * space_width + + return bar_str + + def _print_body(self): + for datum in self._data: + bar_str = self._get_bar_str(datum) + value_padding = ' ' * (self._max_value_len - len(datum.value_str)) + print(bar_str, value_padding + datum.value_str, datum.label) + + +class FreqGraph(Graph): + LOWER_BOUND_WIDTH = 8 + + def __init__(self, data, get_value, get_lower_bound, + get_value_str=None, title=None, unit=None): + super().__init__(data, get_value, get_value_str, title, unit) + + self._get_lower_bound = get_lower_bound + self._data = self._transform_data(self._data) + + def _get_graph_datum(self, datum): + value = self._get_value(datum) + value_str = self._get_value_str(value) + lower_bound = self._get_lower_bound(datum) + + return FreqGraphDatum(value, value_str, lower_bound) + + def _print_header(self): + header = self._title + if self._unit: + header += ' ({})'.format(self._unit) + + print(header) + + def _get_bar_str(self, datum): + max_width = self.MAX_GRAPH_WIDTH - self.LOWER_BOUND_WIDTH + if self._max_value == 0: + bar_width = 0 + else: + bar_width = int(max_width * datum.value / self._max_value) + space_width = max_width - bar_width + bar_str = self.BAR_CHAR * bar_width + ' ' * space_width + + return bar_str + + def _print_body(self): + for datum in self._data: + bound_str = FreqGraph._get_bound_str(datum) + bar_str = self._get_bar_str(datum) + value_padding = ' ' * (self._max_value_len - len(datum.value_str)) + print(bound_str, bar_str, value_padding + datum.value_str) + + @staticmethod + def _get_bound_str(datum): + return '{:>7.03f}'.format(datum.lower_bound) diff -Nru lttnganalyses-0.3.0/lttnganalyses/common/format_utils.py lttnganalyses-0.4.3/lttnganalyses/common/format_utils.py --- lttnganalyses-0.3.0/lttnganalyses/common/format_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/common/format_utils.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,107 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + + +def format_size(size, binary_prefix=True): + """Convert an integral number of bytes to a human-readable string + + Args: + size (int): a non-negative number of bytes + binary_prefix (bool, optional): whether to use binary units + prefixes, over SI prefixes. default: True + + Returns: + The formatted string comprised of the size and units + + Raises: + ValueError: if size < 0 + """ + if size < 0: + raise ValueError('Cannot format negative size') + + if binary_prefix: + base = 1024 + units = [' B', 'KiB', 'MiB', 'GiB','TiB', 'PiB', 'EiB', 'ZiB', 'YiB'] + else: + base = 1000 + units = [' B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + + if size == 0: + exponent = 0 + else: + exponent = int(math.log(size, base)) + if exponent >= len(units): + # Don't try and use a unit above YiB/YB + exponent = len(units) - 1 + + size /= math.pow(base, exponent) + + unit = units[exponent] + + if exponent == 0: + # Don't display fractions of a byte + format_str = '{:0.0f} {}' + else: + format_str = '{:0.2f} {}' + + return format_str.format(size, unit) + +def format_prio_list(prio_list): + """Format a list of prios into a string of unique prios with count + + Args: + prio_list: a list of PrioEvent objects + + Returns: + The formatted string containing the unique priorities and + their count if they occurred more than once. + """ + prio_count = {} + prio_str = None + + for prio_event in prio_list: + prio = prio_event.prio + if prio not in prio_count: + prio_count[prio] = 0 + + prio_count[prio] += 1 + + for prio in sorted(prio_count.keys()): + count = prio_count[prio] + if count > 1: + count_str = ' ({} times)'.format(count) + else: + count_str = '' + + if prio_str is None: + prio_str = '[{}{}'.format(prio, count_str) + else: + prio_str += ', {}{}'.format(prio, count_str) + + if prio_str is None: + prio_str = '[]' + else: + prio_str += ']' + + return prio_str diff -Nru lttnganalyses-0.3.0/lttnganalyses/common/__init__.py lttnganalyses-0.4.3/lttnganalyses/common/__init__.py --- lttnganalyses-0.3.0/lttnganalyses/common/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/common/__init__.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,21 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff -Nru lttnganalyses-0.3.0/lttnganalyses/common/version_utils.py lttnganalyses-0.4.3/lttnganalyses/common/version_utils.py --- lttnganalyses-0.3.0/lttnganalyses/common/version_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/common/version_utils.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,59 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from functools import total_ordering + + +@total_ordering +class Version: + def __init__(self, major, minor, patch, extra=None): + self.major = major + self.minor = minor + self.patch = patch + self.extra = extra + + def __lt__(self, other): + if self.major < other.major: + return True + if self.major > other.major: + return False + + if self.minor < other.minor: + return True + if self.minor > other.minor: + return False + + return self.patch < other.patch + + def __eq__(self, other): + return ( + self.major == other.major and + self.minor == other.minor and + self.patch == other.patch + ) + + def __repr__(self): + version_str = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.extra: + version_str += self.extra + + return version_str diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/analysis.py lttnganalyses-0.4.3/lttnganalyses/core/analysis.py --- lttnganalyses-0.3.0/lttnganalyses/core/analysis.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/analysis.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -23,13 +21,79 @@ # SOFTWARE. +class AnalysisConfig: + def __init__(self): + self.refresh_period = None + self.period_begin_ev_name = None + self.period_end_ev_name = None + self.period_begin_key_fields = None + self.period_end_key_fields = None + self.period_key_value = None + self.begin_ts = None + self.end_ts = None + self.min_duration = None + self.max_duration = None + self.proc_list = None + self.tid_list = None + self.cpu_list = None + + class Analysis: + TICK_CB = 'tick' + + def __init__(self, state, conf): + self._state = state + self._conf = conf + self._period_key = None + self._period_start_ts = None + self._last_event_ts = None + self._notification_cbs = {} + self._cbs = {} + + self.started = False + self.ended = False + def process_event(self, ev): - raise NotImplementedError() + self._last_event_ts = ev.timestamp + + if not self.started: + if self._conf.begin_ts: + self._check_analysis_begin(ev) + if not self.started: + return + else: + self._period_start_ts = ev.timestamp + self.started = True + + self._check_analysis_end(ev) + if self.ended: + return + + # Prioritise period events over refresh period + if self._conf.period_begin_ev_name is not None: + self._handle_period_event(ev) + elif self._conf.refresh_period is not None: + self._check_refresh(ev) def reset(self): raise NotImplementedError() + def end(self): + if self._period_start_ts: + self._end_period() + + def register_notification_cbs(self, cbs): + for name in cbs: + if name not in self._notification_cbs: + self._notification_cbs[name] = [] + + self._notification_cbs[name].append(cbs[name]) + + def _send_notification_cb(self, name, **kwargs): + if name in self._notification_cbs: + for cb in self._notification_cbs[name]: + cb(**kwargs) + def _register_cbs(self, cbs): self._cbs = cbs @@ -45,3 +109,101 @@ (name.startswith('exit_syscall') or name.startswith('syscall_exit_')): self._cbs['syscall_exit'](ev) + + def _check_analysis_begin(self, ev): + if self._conf.begin_ts and ev.timestamp >= self._conf.begin_ts: + self.started = True + self._period_start_ts = ev.timestamp + self.reset() + + def _check_analysis_end(self, ev): + if self._conf.end_ts and ev.timestamp > self._conf.end_ts: + self.ended = True + + def _check_refresh(self, ev): + if not self._period_start_ts: + self._period_start_ts = ev.timestamp + elif ev.timestamp >= (self._period_start_ts + + self._conf.refresh_period): + self._end_period() + self._period_start_ts = ev.timestamp + + def _handle_period_event(self, ev): + if ev.name != self._conf.period_begin_ev_name and \ + ev.name != self._conf.period_end_ev_name: + return + + if self._period_key: + period_key = Analysis._get_period_event_key( + ev, self._conf.period_end_key_fields) + + if not period_key: + # There was an error caused by a missing field, ignore + # this period event + return + + if period_key == self._period_key: + if self._conf.period_end_ev_name: + if ev.name == self._conf.period_end_ev_name: + self._end_period() + self._period_key = None + self._period_start_ts = None + elif ev.name == self._conf.period_begin_ev_name: + self._end_period() + self._begin_period(period_key, ev.timestamp) + elif ev.name == self._conf.period_begin_ev_name: + period_key = Analysis._get_period_event_key( + ev, self._conf.period_begin_key_fields) + + if not period_key: + return + + if self._conf.period_key_value: + # Must convert the period key to string for comparison + str_period_key = tuple(map(str, period_key)) + if self._conf.period_key_value != str_period_key: + return + + self._begin_period(period_key, ev.timestamp) + + def _begin_period(self, period_key, timestamp): + self._period_key = period_key + self._period_start_ts = timestamp + self.reset() + + def _end_period(self): + self._end_period_cb() + self._send_notification_cb(Analysis.TICK_CB, + begin_ns=self._period_start_ts, + end_ns=self._last_event_ts) + + def _end_period_cb(self): + pass + + @staticmethod + def _get_period_event_key(ev, key_fields): + if not key_fields: + return None + + key_values = [] + + for field in key_fields: + try: + key_values.append(ev[field]) + except KeyError: + # Error: missing field + return None + + return tuple(key_values) + + def _filter_process(self, proc): + if not proc: + return True + if self._conf.proc_list and proc.comm not in self._conf.proc_list: + return False + if self._conf.tid_list and proc.tid not in self._conf.tid_list: + return False + return True + + def _filter_cpu(self, cpu): + return not (self._conf.cpu_list and cpu not in self._conf.cpu_list) diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/cputop.py lttnganalyses-0.4.3/lttnganalyses/core/cputop.py --- lttnganalyses-0.3.0/lttnganalyses/core/cputop.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/cputop.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -23,34 +21,45 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from . import stats from .analysis import Analysis class Cputop(Analysis): - def __init__(self, state): + def __init__(self, state, conf): notification_cbs = { 'sched_migrate_task': self._process_sched_migrate_task, 'sched_switch_per_cpu': self._process_sched_switch_per_cpu, - 'sched_switch_per_tid': self._process_sched_switch_per_tid + 'sched_switch_per_tid': self._process_sched_switch_per_tid, + 'prio_changed': self._process_prio_changed, } - self._state = state + super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) + self._ev_count = 0 self.cpus = {} self.tids = {} def process_event(self, ev): + super().process_event(ev) self._ev_count += 1 - def reset(self, timestamp): - for cpu_id in self.cpus: - self.cpus[cpu_id].reset(timestamp) + def reset(self): + for cpu_stats in self.cpus.values(): + cpu_stats.reset() + if cpu_stats.current_task_start_ts is not None: + cpu_stats.current_task_start_ts = self._last_event_ts + + for proc_stats in self.tids.values(): + proc_stats.reset() + if proc_stats.last_sched_ts is not None: + proc_stats.last_sched_ts = self._last_event_ts - for tid in self.tids: - self.tids[tid].reset(timestamp) + def _end_period_cb(self): + self._compute_stats() - def compute_stats(self, start_ts, end_ts): + def _compute_stats(self): """Compute usage stats relative to a certain time range For each CPU and process tracked by the analysis, we set its @@ -61,32 +70,32 @@ process is currently busy, we use the end timestamp to add the partial results of the currently running task to the usage stats. - - Args: - start_ts (int): start of time range (nanoseconds from unix - epoch) - end_ts (int): end of time range (nanoseconds from unix epoch) """ - duration = end_ts - start_ts + duration = self._last_event_ts - self._period_start_ts for cpu_id in self.cpus: cpu = self.cpus[cpu_id] if cpu.current_task_start_ts is not None: - cpu.total_usage_time += end_ts - cpu.current_task_start_ts + cpu.total_usage_time += self._last_event_ts - \ + cpu.current_task_start_ts cpu.compute_stats(duration) for tid in self.tids: proc = self.tids[tid] if proc.last_sched_ts is not None: - proc.total_cpu_time += end_ts - proc.last_sched_ts + proc.total_cpu_time += self._last_event_ts - \ + proc.last_sched_ts proc.compute_stats(duration) def _process_sched_switch_per_cpu(self, **kwargs): timestamp = kwargs['timestamp'] cpu_id = kwargs['cpu_id'] - next_tid = kwargs['next_tid'] + wakee_proc = kwargs['wakee_proc'] + + if not self._filter_cpu(cpu_id): + return if cpu_id not in self.cpus: self.cpus[cpu_id] = CpuUsageStats(cpu_id) @@ -95,41 +104,73 @@ if cpu.current_task_start_ts is not None: cpu.total_usage_time += timestamp - cpu.current_task_start_ts - if next_tid == 0: + if not self._filter_process(wakee_proc): cpu.current_task_start_ts = None else: cpu.current_task_start_ts = timestamp def _process_sched_switch_per_tid(self, **kwargs): + cpu_id = kwargs['cpu_id'] + wakee_proc = kwargs['wakee_proc'] timestamp = kwargs['timestamp'] prev_tid = kwargs['prev_tid'] next_tid = kwargs['next_tid'] next_comm = kwargs['next_comm'] + if not self._filter_cpu(cpu_id): + return + if prev_tid in self.tids: prev_proc = self.tids[prev_tid] if prev_proc.last_sched_ts is not None: prev_proc.total_cpu_time += timestamp - prev_proc.last_sched_ts prev_proc.last_sched_ts = None - # Don't account for swapper process - if next_tid == 0: + # Only filter on wakee_proc after finalizing the prev_proc + # accounting + if not self._filter_process(wakee_proc): return if next_tid not in self.tids: - self.tids[next_tid] = ProcessCpuStats(next_tid, next_comm) + self.tids[next_tid] = ProcessCpuStats(None, next_tid, next_comm) + self.tids[next_tid].update_prio(timestamp, wakee_proc.prio) next_proc = self.tids[next_tid] next_proc.last_sched_ts = timestamp + def _process_sched_migrate_task(self, **kwargs): + cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] tid = proc.tid + + if not self._filter_process(proc): + return + if not self._filter_cpu(cpu_id): + return + if tid not in self.tids: self.tids[tid] = ProcessCpuStats.new_from_process(proc) self.tids[tid].migrate_count += 1 + def _process_prio_changed(self, **kwargs): + timestamp = kwargs['timestamp'] + prio = kwargs['prio'] + tid = kwargs['tid'] + + if tid not in self.tids: + return + + self.tids[tid].update_prio(timestamp, prio) + + def _filter_process(self, proc): + # Exclude swapper + if proc.tid == 0: + return False + + return super()._filter_process(proc) + @property def event_count(self): return self._ev_count @@ -144,35 +185,34 @@ self.usage_percent = None def compute_stats(self, duration): - self.usage_percent = self.total_usage_time * 100 / duration + if duration != 0: + self.usage_percent = self.total_usage_time * 100 / duration + else: + self.usage_percent = 0 - def reset(self, timestamp): + def reset(self): self.total_usage_time = 0 self.usage_percent = None - if self.current_task_start_ts is not None: - self.current_task_start_ts = timestamp -class ProcessCpuStats(): - def __init__(self, tid, comm): - self.tid = tid - self.comm = comm +class ProcessCpuStats(stats.Process): + def __init__(self, pid, tid, comm): + super().__init__(pid, tid, comm) + # CPU Time and timestamp in nanoseconds (ns) self.total_cpu_time = 0 self.last_sched_ts = None self.migrate_count = 0 self.usage_percent = None - @classmethod - def new_from_process(cls, proc): - return cls(proc.tid, proc.comm) - def compute_stats(self, duration): - self.usage_percent = self.total_cpu_time * 100 / duration + if duration != 0: + self.usage_percent = self.total_cpu_time * 100 / duration + else: + self.usage_percent = 0 - def reset(self, timestamp): + def reset(self): + super().reset() self.total_cpu_time = 0 self.migrate_count = 0 self.usage_percent = None - if self.last_sched_ts is not None: - self.last_sched_ts = timestamp diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/__init__.py lttnganalyses-0.4.3/lttnganalyses/core/__init__.py --- lttnganalyses-0.3.0/lttnganalyses/core/__init__.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/__init__.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/io.py lttnganalyses-0.4.3/lttnganalyses/core/io.py --- lttnganalyses-0.3.0/lttnganalyses/core/io.py 2015-07-13 18:00:14.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/io.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque @@ -22,12 +20,13 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from . import stats from .analysis import Analysis from ..linuxautomaton import sv class IoAnalysis(Analysis): - def __init__(self, state): + def __init__(self, state, conf): notification_cbs = { 'net_dev_xmit': self._process_net_dev_xmit, 'netif_receive_skb': self._process_netif_receive_skb, @@ -35,6 +34,7 @@ 'io_rq_exit': self._process_io_rq_exit, 'create_fd': self._process_create_fd, 'close_fd': self._process_close_fd, + 'update_fd': self._process_update_fd, 'create_parent_proc': self._process_create_parent_proc } @@ -43,7 +43,7 @@ self._process_lttng_statedump_block_device } - self._state = state + super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) self._register_cbs(event_cbs) @@ -52,6 +52,7 @@ self.tids = {} def process_event(self, ev): + super().process_event(ev) self._process_event_cb(ev) def reset(self): @@ -115,32 +116,20 @@ io_rq.operation): yield io_rq - def get_files_stats(self, pid_filter_list, comm_filter_list): + def get_files_stats(self): files_stats = {} for proc_stats in self.tids.values(): - if pid_filter_list is not None and \ - proc_stats.pid not in pid_filter_list or \ - comm_filter_list is not None and \ - proc_stats.comm not in comm_filter_list: - continue - for fd_list in proc_stats.fds.values(): for fd_stats in fd_list: filename = fd_stats.filename # Add process name to generic filenames to # distinguish them if FileStats.is_generic_name(filename): - filename += '(%s)' % proc_stats.comm + filename += ' (%s)' % proc_stats.comm if filename not in files_stats: - if proc_stats.pid is not None: - pid = proc_stats.pid - else: - pid = proc_stats.tid - - files_stats[filename] = FileStats( - filename, fd_stats.fd, pid) + files_stats[filename] = FileStats(filename) files_stats[filename].update_stats(fd_stats, proc_stats) @@ -164,6 +153,10 @@ def _process_net_dev_xmit(self, **kwargs): name = kwargs['iface_name'] sent_bytes = kwargs['sent_bytes'] + cpu = kwargs['cpu_id'] + + if not self._filter_cpu(cpu): + return if name not in self.ifaces: self.ifaces[name] = IfaceStats(name) @@ -174,6 +167,10 @@ def _process_netif_receive_skb(self, **kwargs): name = kwargs['iface_name'] recv_bytes = kwargs['recv_bytes'] + cpu = kwargs['cpu_id'] + + if not self._filter_cpu(cpu): + return if name not in self.ifaces: self.ifaces[name] = IfaceStats(name) @@ -184,16 +181,23 @@ def _process_block_rq_complete(self, **kwargs): req = kwargs['req'] proc = kwargs['proc'] + cpu = kwargs['cpu_id'] + + if not self._filter_process(proc): + return + if not self._filter_cpu(cpu): + return if req.dev not in self.disks: self.disks[req.dev] = DiskStats(req.dev) self.disks[req.dev].update_stats(req) - if proc.tid not in self.tids: - self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) + if proc is not None: + if proc.tid not in self.tids: + self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) - self.tids[proc.tid].update_block_stats(req) + self.tids[proc.tid].update_block_stats(req) def _process_lttng_statedump_block_device(self, event): dev = event['dev'] @@ -208,6 +212,12 @@ proc = kwargs['proc'] parent_proc = kwargs['parent_proc'] io_rq = kwargs['io_rq'] + cpu = kwargs['cpu_id'] + + if not self._filter_process(parent_proc): + return + if not self._filter_cpu(cpu): + return if proc.tid not in self.tids: self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) @@ -231,10 +241,20 @@ proc_stats.update_io_stats(io_rq, fd_types) parent_stats.update_fd_stats(io_rq) + # Check if the proc stats comm corresponds to the actual + # process comm. It might be that it was missing so far. + if proc_stats.comm != proc.comm: + proc_stats.comm = proc.comm + if parent_stats.comm != parent_proc.comm: + parent_stats.comm = parent_proc.comm + def _process_create_parent_proc(self, **kwargs): proc = kwargs['proc'] parent_proc = kwargs['parent_proc'] + if not self._filter_process(parent_proc): + return + if proc.tid not in self.tids: self.tids[proc.tid] = ProcessIOStats.new_from_process(proc) @@ -252,8 +272,14 @@ timestamp = kwargs['timestamp'] parent_proc = kwargs['parent_proc'] tid = parent_proc.tid + cpu = kwargs['cpu_id'] fd = kwargs['fd'] + if not self._filter_process(parent_proc): + return + if not self._filter_cpu(cpu): + return + if tid not in self.tids: self.tids[tid] = ProcessIOStats.new_from_process(parent_proc) @@ -267,12 +293,27 @@ timestamp = kwargs['timestamp'] parent_proc = kwargs['parent_proc'] tid = parent_proc.tid + cpu = kwargs['cpu_id'] fd = kwargs['fd'] + if not self._filter_process(parent_proc): + return + if not self._filter_cpu(cpu): + return + parent_stats = self.tids[tid] last_fd = parent_stats.get_fd(fd) last_fd.close_ts = timestamp + def _process_update_fd(self, **kwargs): + parent_proc = kwargs['parent_proc'] + tid = parent_proc.tid + fd = kwargs['fd'] + + new_filename = parent_proc.fds[fd].filename + fd_list = self.tids[tid].fds[fd] + fd_list[-1].filename = new_filename + class DiskStats(): MINORBITS = 20 @@ -336,22 +377,13 @@ self.sent_packets = 0 -class ProcessIOStats(): +class ProcessIOStats(stats.Process): def __init__(self, pid, tid, comm): - self.pid = pid - self.tid = tid - self.comm = comm - # Number of bytes read or written by the process, by type of I/O - self.disk_read = 0 - self.disk_write = 0 - self.net_read = 0 - self.net_write = 0 - self.unk_read = 0 - self.unk_write = 0 - # Actual number of bytes read or written by the process at the - # block layer - self.block_read = 0 - self.block_write = 0 + super().__init__(pid, tid, comm) + self.disk_io = stats.IO() + self.net_io = stats.IO() + self.unk_io = stats.IO() + self.block_io = stats.IO() # FDStats objects, indexed by fd (fileno) self.fds = {} self.rq_list = [] @@ -363,11 +395,11 @@ # Total read/write does not account for block layer I/O @property def total_read(self): - return self.disk_read + self.net_read + self.unk_read + return self.disk_io.read + self.net_io.read + self.unk_io.read @property def total_write(self): - return self.disk_write + self.net_write + self.unk_write + return self.disk_io.write + self.net_io.write + self.unk_io.write def update_fd_stats(self, req): if req.errno is not None: @@ -386,9 +418,9 @@ self.rq_list.append(req) if req.operation is sv.IORequest.OP_READ: - self.block_read += req.size + self.block_io.read += req.size elif req.operation is sv.IORequest.OP_WRITE: - self.block_write += req.size + self.block_io.write += req.size def update_io_stats(self, req, fd_types): self.rq_list.append(req) @@ -404,23 +436,21 @@ self._update_read(req.returned_size, fd_types['fd_in']) self._update_write(req.returned_size, fd_types['fd_out']) - self.rq_list.append(req) - def _update_read(self, size, fd_type): if fd_type == sv.FDType.disk: - self.disk_read += size + self.disk_io.read += size elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net: - self.net_read += size + self.net_io.read += size else: - self.unk_read += size + self.unk_io.read += size def _update_write(self, size, fd_type): if fd_type == sv.FDType.disk: - self.disk_write += size + self.disk_io.write += size elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net: - self.net_write += size + self.net_io.write += size else: - self.unk_write += size + self.unk_io.write += size def _get_current_fd(self, fd): fd_stats = self.fds[fd][-1] @@ -484,14 +514,10 @@ return fd_stats def reset(self): - self.disk_read = 0 - self.disk_write = 0 - self.net_read = 0 - self.net_write = 0 - self.unk_read = 0 - self.unk_write = 0 - self.block_read = 0 - self.block_write = 0 + self.disk_io.reset() + self.net_io.reset() + self.unk_io.reset() + self.block_io.reset() self.rq_list = [] for fd in self.fds: @@ -509,10 +535,7 @@ self.family = family self.open_ts = open_ts self.close_ts = None - - # Number of bytes read or written - self.read = 0 - self.write = 0 + self.io = stats.IO() # IO Requests that acted upon the FD self.rq_list = [] @@ -523,38 +546,35 @@ def update_stats(self, req): if req.operation is sv.IORequest.OP_READ: - self.read += req.returned_size + self.io.read += req.returned_size elif req.operation is sv.IORequest.OP_WRITE: - self.write += req.returned_size + self.io.write += req.returned_size elif req.operation is sv.IORequest.OP_READ_WRITE: if self.fd == req.fd_in: - self.read += req.returned_size + self.io.read += req.returned_size elif self.fd == req.fd_out: - self.write += req.returned_size + self.io.write += req.returned_size self.rq_list.append(req) def reset(self): - self.read = 0 - self.write = 0 + self.io.reset() self.rq_list = [] class FileStats(): GENERIC_NAMES = ['pipe', 'socket', 'anon_inode', 'unknown'] - def __init__(self, filename, fd, pid): + def __init__(self, filename): self.filename = filename - # Number of bytes read or written - self.read = 0 - self.write = 0 + self.io = stats.IO() # Dict of file descriptors representing this file, indexed by # parent pid - self.fd_by_pid = {pid: fd} + # FIXME this doesn't cover FD reuse cases + self.fd_by_pid = {} def update_stats(self, fd_stats, proc_stats): - self.read += fd_stats.read - self.write += fd_stats.write + self.io += fd_stats.io if proc_stats.pid is not None: pid = proc_stats.pid @@ -565,8 +585,7 @@ self.fd_by_pid[pid] = fd_stats.fd def reset(self): - self.read = 0 - self.write = 0 + self.io.reset() @staticmethod def is_generic_name(filename): diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/irq.py lttnganalyses-0.4.3/lttnganalyses/core/irq.py --- lttnganalyses-0.3.0/lttnganalyses/core/irq.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/irq.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque @@ -26,22 +24,15 @@ class IrqAnalysis(Analysis): - def __init__(self, state, min_duration, max_duration): + def __init__(self, state, conf): notification_cbs = { 'irq_handler_entry': self._process_irq_handler_entry, 'irq_handler_exit': self._process_irq_handler_exit, 'softirq_exit': self._process_softirq_exit } - self._state = state + super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) - self._min_duration = min_duration - self._max_duration = max_duration - # µs to ns - if self._min_duration is not None: - self._min_duration *= 1000 - if self._max_duration is not None: - self._max_duration *= 1000 # Indexed by irq 'id' (irq or vec) self.hard_irq_stats = {} @@ -49,9 +40,6 @@ # Log of individual interrupts self.irq_list = [] - def process_event(self, ev): - pass - def reset(self): self.irq_list = [] for id in self.hard_irq_stats: @@ -64,16 +52,20 @@ name = kwargs['irq_name'] if id not in self.hard_irq_stats: self.hard_irq_stats[id] = HardIrqStats(name) - elif self.hard_irq_stats[id].name != name: - self.hard_irq_stats[id].name = name + elif name not in self.hard_irq_stats[id].names: + self.hard_irq_stats[id].names.append(name) def _process_irq_handler_exit(self, **kwargs): irq = kwargs['hard_irq'] - duration = irq.end_ts - irq.begin_ts - if self._min_duration is not None and duration < self._min_duration: + if not self._filter_cpu(irq.cpu_id): + return + + if self._conf.min_duration is not None and \ + irq.duration < self._conf.min_duration: return - if self._max_duration is not None and duration > self._max_duration: + if self._conf.max_duration is not None and \ + irq.duration > self._conf.max_duration: return self.irq_list.append(irq) @@ -85,10 +77,14 @@ def _process_softirq_exit(self, **kwargs): irq = kwargs['softirq'] - duration = irq.end_ts - irq.begin_ts - if self._min_duration is not None and duration < self._min_duration: + if not self._filter_cpu(irq.cpu_id): + return + + if self._conf.min_duration is not None and \ + irq.duration < self._conf.min_duration: return - if self._max_duration is not None and duration > self._max_duration: + if self._conf.max_duration is not None and \ + irq.duration > self._conf.max_duration: return self.irq_list.append(irq) @@ -101,26 +97,28 @@ class IrqStats(): def __init__(self, name): - self.name = name + self._name = name self.min_duration = None self.max_duration = None self.total_duration = 0 self.irq_list = [] @property + def name(self): + return self._name + + @property def count(self): return len(self.irq_list) def update_stats(self, irq): - duration = irq.end_ts - irq.begin_ts - - if self.min_duration is None or duration < self.min_duration: - self.min_duration = duration + if self.min_duration is None or irq.duration < self.min_duration: + self.min_duration = irq.duration - if self.max_duration is None or duration > self.max_duration: - self.max_duration = duration + if self.max_duration is None or irq.duration > self.max_duration: + self.max_duration = irq.duration - self.total_duration += duration + self.total_duration += irq.duration self.irq_list.append(irq) def reset(self): @@ -131,8 +129,15 @@ class HardIrqStats(IrqStats): + NAMES_SEPARATOR = ', ' + def __init__(self, name='unknown'): super().__init__(name) + self.names = [name] + + @property + def name(self): + return self.NAMES_SEPARATOR.join(self.names) class SoftIrqStats(IrqStats): diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/memtop.py lttnganalyses-0.4.3/lttnganalyses/core/memtop.py --- lttnganalyses-0.3.0/lttnganalyses/core/memtop.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/memtop.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque @@ -22,29 +20,35 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from . import stats from .analysis import Analysis class Memtop(Analysis): - def __init__(self, state): + def __init__(self, state, conf): notification_cbs = { 'tid_page_alloc': self._process_tid_page_alloc, 'tid_page_free': self._process_tid_page_free } - self._state = state + super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) - self.tids = {} - def process_event(self, ev): - pass + self.tids = {} def reset(self): for tid in self.tids: self.tids[tid].reset() def _process_tid_page_alloc(self, **kwargs): + cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] + + if not self._filter_process(proc): + return + if not self._filter_cpu(cpu_id): + return + tid = proc.tid if tid not in self.tids: self.tids[tid] = ProcessMemStats.new_from_process(proc) @@ -52,7 +56,14 @@ self.tids[tid].allocated_pages += 1 def _process_tid_page_free(self, **kwargs): + cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] + + if not self._filter_process(proc): + return + if not self._filter_cpu(cpu_id): + return + tid = proc.tid if tid not in self.tids: self.tids[tid] = ProcessMemStats.new_from_process(proc) @@ -60,18 +71,13 @@ self.tids[tid].freed_pages += 1 -class ProcessMemStats(): +class ProcessMemStats(stats.Process): def __init__(self, pid, tid, comm): - self.pid = pid - self.tid = tid - self.comm = comm + super().__init__(pid, tid, comm) + self.allocated_pages = 0 self.freed_pages = 0 - @classmethod - def new_from_process(cls, proc): - return cls(proc.pid, proc.tid, proc.comm) - def reset(self): self.allocated_pages = 0 self.freed_pages = 0 diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/sched.py lttnganalyses-0.4.3/lttnganalyses/core/sched.py --- lttnganalyses-0.3.0/lttnganalyses/core/sched.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/sched.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,159 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from . import stats +from .analysis import Analysis + + +class SchedAnalysis(Analysis): + def __init__(self, state, conf): + notification_cbs = { + 'sched_switch_per_tid': self._process_sched_switch, + 'prio_changed': self._process_prio_changed, + } + + super().__init__(state, conf) + self._state.register_notification_cbs(notification_cbs) + + # Log of individual wake scheduling events + self.sched_list = [] + # Scheduling latency stats indexed by TID + self.tids = {} + # Stats + self.min_latency = None + self.max_latency = None + self.total_latency = 0 + + @property + def count(self): + return len(self.sched_list) + + def reset(self): + self.sched_list = [] + self.min_latency = None + self.max_latency = None + self.total_latency = 0 + for tid in self.tids: + self.tids[tid].reset() + + def _process_sched_switch(self, **kwargs): + cpu_id = kwargs['cpu_id'] + switch_ts = kwargs['timestamp'] + wakee_proc = kwargs['wakee_proc'] + waker_proc = kwargs['waker_proc'] + next_tid = kwargs['next_tid'] + wakeup_ts = wakee_proc.last_wakeup + + if not self._filter_process(wakee_proc): + return + if not self._filter_cpu(cpu_id): + return + + if wakeup_ts is None: + return + + latency = switch_ts - wakeup_ts + if self._conf.min_duration is not None and \ + latency < self._conf.min_duration: + return + if self._conf.max_duration is not None and \ + latency > self._conf.max_duration: + return + + if waker_proc is not None and waker_proc.tid not in self.tids: + self.tids[waker_proc.tid] = \ + ProcessSchedStats.new_from_process(waker_proc) + self.tids[waker_proc.tid].update_prio(switch_ts, waker_proc.prio) + + if next_tid not in self.tids: + self.tids[next_tid] = \ + ProcessSchedStats.new_from_process(wakee_proc) + self.tids[next_tid].update_prio(switch_ts, wakee_proc.prio) + + sched_event = SchedEvent( + wakeup_ts, switch_ts, wakee_proc, waker_proc, cpu_id) + self.tids[next_tid].update_stats(sched_event) + self._update_stats(sched_event) + + def _process_prio_changed(self, **kwargs): + timestamp = kwargs['timestamp'] + prio = kwargs['prio'] + tid = kwargs['tid'] + + if tid not in self.tids: + return + + self.tids[tid].update_prio(timestamp, prio) + + def _update_stats(self, sched_event): + if self.min_latency is None or sched_event.latency < self.min_latency: + self.min_latency = sched_event.latency + + if self.max_latency is None or sched_event.latency > self.max_latency: + self.max_latency = sched_event.latency + + self.total_latency += sched_event.latency + self.sched_list.append(sched_event) + + +class ProcessSchedStats(stats.Process): + def __init__(self, pid, tid, comm): + super().__init__(pid, tid, comm) + + self.min_latency = None + self.max_latency = None + self.total_latency = 0 + self.sched_list = [] + + @property + def count(self): + return len(self.sched_list) + + def update_stats(self, sched_event): + if self.min_latency is None or sched_event.latency < self.min_latency: + self.min_latency = sched_event.latency + + if self.max_latency is None or sched_event.latency > self.max_latency: + self.max_latency = sched_event.latency + + self.total_latency += sched_event.latency + self.sched_list.append(sched_event) + + def reset(self): + super().reset() + self.min_latency = None + self.max_latency = None + self.total_latency = 0 + self.sched_list = [] + + +class SchedEvent(): + def __init__(self, wakeup_ts, switch_ts, wakee_proc, waker_proc, + target_cpu): + self.wakeup_ts = wakeup_ts + self.switch_ts = switch_ts + self.wakee_proc = wakee_proc + self.waker_proc = waker_proc + self.prio = wakee_proc.prio + self.target_cpu = target_cpu + self.latency = switch_ts - wakeup_ts diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/stats.py lttnganalyses-0.4.3/lttnganalyses/core/stats.py --- lttnganalyses-0.3.0/lttnganalyses/core/stats.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/stats.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,67 @@ +# The MIT License (MIT) +# +# Copyright (C) 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from collections import namedtuple + + +PrioEvent = namedtuple('PrioEvent', ['timestamp', 'prio']) + + +class Stats(): + def reset(self): + raise NotImplementedError() + + +class Process(Stats): + def __init__(self, pid, tid, comm): + self.pid = pid + self.tid = tid + self.comm = comm + self.prio_list = [] + + @classmethod + def new_from_process(cls, proc): + return cls(proc.pid, proc.tid, proc.comm) + + def update_prio(self, timestamp, prio): + self.prio_list.append(PrioEvent(timestamp, prio)) + + def reset(self): + if self.prio_list: + # Keep the last prio as the first for the next period + self.prio_list = self.prio_list[-1:] + + +class IO(Stats): + def __init__(self): + # Number of bytes read or written + self.read = 0 + self.write = 0 + + def reset(self): + self.read = 0 + self.write = 0 + + def __iadd__(self, other): + self.read += other.read + self.write += other.write + return self diff -Nru lttnganalyses-0.3.0/lttnganalyses/core/syscalls.py lttnganalyses-0.4.3/lttnganalyses/core/syscalls.py --- lttnganalyses-0.3.0/lttnganalyses/core/syscalls.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/core/syscalls.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Antoine Busque @@ -22,32 +20,38 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from . import stats from .analysis import Analysis class SyscallsAnalysis(Analysis): - def __init__(self, state): + def __init__(self, state, conf): notification_cbs = { 'syscall_exit': self._process_syscall_exit } - self._state = state + super().__init__(state, conf) self._state.register_notification_cbs(notification_cbs) + self.tids = {} self.total_syscalls = 0 - def process_event(self, ev): - pass - def reset(self): + # FIXME why no reset? pass def _process_syscall_exit(self, **kwargs): + cpu_id = kwargs['cpu_id'] proc = kwargs['proc'] tid = proc.tid current_syscall = proc.current_syscall name = current_syscall.name + if not self._filter_process(proc): + return + if not self._filter_cpu(cpu_id): + return + if tid not in self.tids: self.tids[tid] = ProcessSyscallStats.new_from_process(proc) @@ -60,18 +64,16 @@ self.total_syscalls += 1 -class ProcessSyscallStats(): +class ProcessSyscallStats(stats.Process): def __init__(self, pid, tid, comm): - self.pid = pid - self.tid = tid - self.comm = comm + super().__init__(pid, tid, comm) + # indexed by syscall name self.syscalls = {} self.total_syscalls = 0 - @classmethod - def new_from_process(cls, proc): - return cls(proc.pid, proc.tid, proc.comm) + def reset(self): + pass class SyscallStats(): diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/automaton.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/automaton.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/automaton.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/automaton.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -41,6 +39,9 @@ self.disks = {} self.mm = MemoryManagement() self._notification_cbs = {} + # State changes can be handled differently depending on + # version of tracer used, so keep track of it. + self._tracer_version = None def register_notification_cbs(self, cbs): for name in cbs: diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/block.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/block.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/block.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/block.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -35,12 +33,8 @@ 'block_bio_backmerge': self._process_block_bio_backmerge, } - self._state = state + super().__init__(state, cbs) self._remap_requests = [] - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) def _process_block_bio_remap(self, event): dev = event['dev'] @@ -116,7 +110,10 @@ return req.update_from_rq_complete(event) - proc = self._state.tids[req.tid] + if req.tid in self._state.tids.keys(): + proc = self._state.tids[req.tid] + else: + proc = None self._state.send_notification_cb('block_rq_complete', req=req, - proc=proc) + proc=proc, cpu_id=event['cpu_id']) del disk.pending_requests[sector] diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/common.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/common.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/common.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/common.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -22,7 +20,6 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import math import re import time import datetime @@ -30,7 +27,13 @@ import struct NSEC_PER_SEC = 1000000000 -MSEC_PER_NSEC = 1000000 +NSEC_PER_MSEC = 1000000 +NSEC_PER_USEC = 1000 + +BYTES_PER_TIB = 1099511627776 +BYTES_PER_GIB = 1073741824 +BYTES_PER_MIB = 1048576 +BYTES_PER_KIB = 1024 O_CLOEXEC = 0o2000000 @@ -46,32 +49,6 @@ return name[14:] -def convert_size(size, padding_after=False, padding_before=False): - if padding_after and size < 1024: - space_after = ' ' - else: - space_after = '' - if padding_before and size < 1024: - space_before = ' ' - else: - space_before = '' - if size <= 0: - return '0 ' + space_before + 'B' + space_after - size_name = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB') - i = int(math.floor(math.log(size, 1024))) - p = math.pow(1024, i) - s = round(size/p, 2) - if s > 0: - try: - v = '%0.02f' % s - return '%s %s%s%s' % (v, space_before, size_name[i], space_after) - except: - print(i, size_name) - raise Exception('Too big to be true') - else: - return '0 B' - - def is_multi_day_trace_collection(handles): time_begin = None @@ -112,7 +89,7 @@ def extract_timerange(handles, timerange, gmt): pattern = re.compile(r'^\[(?P.*),(?P.*)\]$') if not pattern.match(timerange): - return None + return None, None begin_str = pattern.search(timerange).group('begin').strip() end_str = pattern.search(timerange).group('end').strip() begin = date_to_epoch_nsec(handles, begin_str, gmt) @@ -122,19 +99,22 @@ def date_to_epoch_nsec(handles, date, gmt): # match 2014-12-12 17:29:43.802588035 or 2014-12-12T17:29:43.802588035 - pattern1 = re.compile(r'^(?P\d\d\d\d)-(?P[01]\d)-' - r'(?P[0123]\d)[\sTt]' - r'(?P\d\d):(?P\d\d):(?P\d\d).' - r'(?P\d\d\d\d\d\d\d\d\d)$') + pattern1 = re.compile(r'^(?P\d{4})-(?P[01]\d)-' + r'(?P[0-3]\d)[\sTt]' + r'(?P\d{2}):(?P\d{2}):(?P\d{2})\.' + r'(?P\d{9})$') # match 2014-12-12 17:29:43 or 2014-12-12T17:29:43 - pattern2 = re.compile(r'^(?P\d\d\d\d)-(?P[01]\d)-' - r'(?P[0123]\d)[\sTt]' - r'(?P\d\d):(?P\d\d):(?P\d\d)$') + pattern2 = re.compile(r'^(?P\d{4})-(?P[01]\d)-' + r'(?P[0-3]\d)[\sTt]' + r'(?P\d{2}):(?P\d{2}):(?P\d{2})$') # match 17:29:43.802588035 - pattern3 = re.compile(r'^(?P\d\d):(?P\d\d):(?P\d\d).' - r'(?P\d\d\d\d\d\d\d\d\d)$') + pattern3 = re.compile(r'^(?P\d{2}):(?P\d{2}):(?P\d{2})\.' + r'(?P\d{9})$') # match 17:29:43 - pattern4 = re.compile(r'^(?P\d\d):(?P\d\d):(?P\d\d)$') + pattern4 = re.compile(r'^(?P\d{2}):(?P\d{2}):(?P\d{2})$') + + # match 93847238974923874 + pattern5 = re.compile(r'^\d+$') if pattern1.match(date): year = pattern1.search(date).group('year') @@ -174,6 +154,8 @@ minute = pattern4.search(date).group('min') sec = pattern4.search(date).group('sec') nsec = 0 + elif pattern5.match(date): + return int(date) else: return None @@ -233,29 +215,85 @@ return socket.inet_ntoa(struct.pack('!I', ip)) -def str_to_bytes(value): - num = '' - unit = '' - for i in value: - if i.isdigit() or i == '.': - num = num + i - elif i.isalnum(): - unit = unit + i - num = float(num) - if not unit: - return int(num) - if unit in ['B']: - return int(num) - if unit in ['k', 'K', 'kB', 'KB']: - return int(num * 1024) - if unit in ['m', 'M', 'mB', 'MB']: - return int(num * 1024 * 1024) - if unit in ['g', 'G', 'gB', 'GB']: - return int(num * 1024 * 1024 * 1024) - if unit in ['t', 'T', 'tB', 'TB']: - return int(num * 1024 * 1024 * 1024 * 1024) - print('Unit', unit, 'not understood') - return None +def size_str_to_bytes(size_str): + try: + units_index = next(i for i, c in enumerate(size_str) if c.isalpha()) + except StopIteration: + # no units found + units_index = None + + if units_index is not None: + size = size_str[:units_index] + units = size_str[units_index:] + else: + size = size_str + units = None + + try: + size = float(size) + except ValueError: + raise ValueError('invalid size: {}'.format(size)) + + # no units defaults to bytes + if units is not None: + if units in ['t', 'T', 'tB', 'TB']: + size *= BYTES_PER_TIB + elif units in ['g', 'G', 'gB', 'GB']: + size *= BYTES_PER_GIB + elif units in ['m', 'M', 'mB', 'MB']: + size *= BYTES_PER_MIB + elif units in ['k', 'K', 'kB', 'KB']: + size *= BYTES_PER_KIB + elif units == 'B': + # bytes is already the target unit + pass + else: + raise ValueError('unrecognised units: {}'.format(units)) + + size = int(size) + + return size + + +def duration_str_to_ns(duration_str): + try: + units_index = next(i for i, c in enumerate(duration_str) + if c.isalpha()) + except StopIteration: + # no units found + units_index = None + + if units_index is not None: + duration = duration_str[:units_index] + units = duration_str[units_index:].lower() + else: + duration = duration_str + units = None + + try: + duration = float(duration) + except ValueError: + raise ValueError('invalid duration: {}'.format(duration)) + + if units is not None: + if units == 's': + duration *= NSEC_PER_SEC + elif units == 'ms': + duration *= NSEC_PER_MSEC + elif units in ['us', 'µs']: + duration *= NSEC_PER_USEC + elif units == 'ns': + # ns is already the target unit + pass + else: + raise ValueError('unrecognised units: {}'.format(units)) + else: + # no units defaults to seconds + duration *= NSEC_PER_SEC + + duration = int(duration) + + return duration def get_v4_addr_str(ip): diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/__init__.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/__init__.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/__init__.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/__init__.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/io.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/io.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/io.py 2015-07-13 18:00:14.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/io.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -24,8 +22,8 @@ # SOFTWARE. import socket -from . import sp, sv, common from babeltrace import CTFScope +from . import sp, sv, common class IoStateProvider(sp.StateProvider): @@ -39,11 +37,7 @@ 'mm_page_free': self._process_mm_page_free } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _process_syscall_entry(self, event): # Only handle IO Syscalls @@ -192,7 +186,8 @@ oldfd = event['oldfd'] newfd = event['newfd'] if newfd in fds: - self._close_fd(parent_proc, newfd, event.timestamp) + self._close_fd(parent_proc, newfd, event.timestamp, + event['cpu_id']) elif name == 'fcntl': # Only handle if cmd == F_DUPFD (0) if event['cmd'] != 0: @@ -256,6 +251,7 @@ def _track_io_rq_exit(self, event, proc): ret = event['ret'] + cpu_id = event['cpu_id'] io_rq = proc.current_syscall.io_rq # io_rq can be None in the case of fcntl when cmd is not # F_DUPFD, in which case we disregard the syscall as it did @@ -266,18 +262,19 @@ io_rq.update_from_exit(event) if ret >= 0: - self._create_fd(proc, io_rq) + self._create_fd(proc, io_rq, cpu_id) parent_proc = self._get_parent_proc(proc) self._state.send_notification_cb('io_rq_exit', io_rq=io_rq, proc=proc, - parent_proc=parent_proc) + parent_proc=parent_proc, + cpu_id=cpu_id) if isinstance(io_rq, sv.CloseIORequest) and ret == 0: - self._close_fd(proc, io_rq.fd, io_rq.end_ts) + self._close_fd(proc, io_rq.fd, io_rq.end_ts, cpu_id) - def _create_fd(self, proc, io_rq): + def _create_fd(self, proc, io_rq, cpu_id): parent_proc = self._get_parent_proc(proc) if io_rq.fd is not None and io_rq.fd not in parent_proc.fds: @@ -289,14 +286,16 @@ self._state.send_notification_cb('create_fd', fd=io_rq.fd, parent_proc=parent_proc, - timestamp=io_rq.end_ts) + timestamp=io_rq.end_ts, + cpu_id=cpu_id) elif isinstance(io_rq, sv.ReadWriteIORequest): if io_rq.fd_in is not None and io_rq.fd_in not in parent_proc.fds: parent_proc.fds[io_rq.fd_in] = sv.FD(io_rq.fd_in) self._state.send_notification_cb('create_fd', fd=io_rq.fd_in, parent_proc=parent_proc, - timestamp=io_rq.end_ts) + timestamp=io_rq.end_ts, + cpu_id=cpu_id) if io_rq.fd_out is not None and \ io_rq.fd_out not in parent_proc.fds: @@ -304,14 +303,16 @@ self._state.send_notification_cb('create_fd', fd=io_rq.fd_out, parent_proc=parent_proc, - timestamp=io_rq.end_ts) + timestamp=io_rq.end_ts, + cpu_id=cpu_id) - def _close_fd(self, proc, fd, timestamp): + def _close_fd(self, proc, fd, timestamp, cpu_id): parent_proc = self._get_parent_proc(proc) self._state.send_notification_cb('close_fd', fd=fd, parent_proc=parent_proc, - timestamp=timestamp) + timestamp=timestamp, + cpu_id=cpu_id) del parent_proc.fds[fd] def _get_parent_proc(self, proc): @@ -338,5 +339,6 @@ proc.pid = event['pid'] if event['pid'] != proc.tid: proc.pid = event['pid'] - parent_proc = sv.Process(proc.pid, proc.pid, proc.comm) + parent_proc = sv.Process(proc.pid, proc.pid, proc.comm, + proc.prio) self._state.tids[parent_proc.pid] = parent_proc diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/irq.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/irq.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/irq.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/irq.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -36,11 +34,7 @@ 'softirq_exit': self._process_softirq_exit } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _get_cpu(self, cpu_id): if cpu_id not in self._state.cpus: @@ -106,11 +100,16 @@ def _process_softirq_exit(self, event): cpu = self._get_cpu(event['cpu_id']) vec = event['vec'] - - if not cpu.current_softirqs[vec]: + # List of enqueued softirqs for the current cpu/vec + # combination. None if vec is not found in the dictionary. + current_softirqs = cpu.current_softirqs.get(vec) + + # Ignore the exit if either vec was not in the cpu's dict or + # if its irq list was empty (i.e. no matching raise). + if not current_softirqs: return - cpu.current_softirqs[vec][0].end_ts = event.timestamp + current_softirqs[0].end_ts = event.timestamp self._state.send_notification_cb('softirq_exit', - softirq=cpu.current_softirqs[vec][0]) - del cpu.current_softirqs[vec][0] + softirq=current_softirqs[0]) + del current_softirqs[0] diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/mem.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/mem.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/mem.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/mem.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -30,14 +28,12 @@ def __init__(self, state): cbs = { 'mm_page_alloc': self._process_mm_page_alloc, - 'mm_page_free': self._process_mm_page_free + 'kmem_mm_page_alloc': self._process_mm_page_alloc, + 'mm_page_free': self._process_mm_page_free, + 'kmem_mm_page_free': self._process_mm_page_free, } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _get_current_proc(self, event): cpu_id = event['cpu_id'] @@ -67,7 +63,8 @@ return self._state.send_notification_cb('tid_page_alloc', - proc=current_process) + proc=current_process, + cpu_id=event['cpu_id']) def _process_mm_page_free(self, event): if self._state.mm.page_count == 0: @@ -79,4 +76,6 @@ if current_process is None: return - self._state.send_notification_cb('tid_page_free', proc=current_process) + self._state.send_notification_cb('tid_page_free', + proc=current_process, + cpu_id=event['cpu_id']) diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/net.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/net.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/net.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/net.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -33,16 +31,13 @@ 'netif_receive_skb': self._process_netif_receive_skb, } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _process_net_dev_xmit(self, event): self._state.send_notification_cb('net_dev_xmit', iface_name=event['name'], - sent_bytes=event['len']) + sent_bytes=event['len'], + cpu_id=event['cpu_id']) cpu_id = event['cpu_id'] if cpu_id not in self._state.cpus: @@ -70,4 +65,5 @@ def _process_netif_receive_skb(self, event): self._state.send_notification_cb('netif_receive_skb', iface_name=event['name'], - recv_bytes=event['len']) + recv_bytes=event['len'], + cpu_id=event['cpu_id']) diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sched.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sched.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sched.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sched.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -24,34 +22,27 @@ # SOFTWARE. from . import sp, sv +from ..common import version_utils class SchedStateProvider(sp.StateProvider): + # The priority offset for sched_wak* events was fixed in + # lttng-modules 2.7.1 upwards + PRIO_OFFSET_FIX_VERSION = version_utils.Version(2, 7, 1) + def __init__(self, state): cbs = { 'sched_switch': self._process_sched_switch, 'sched_migrate_task': self._process_sched_migrate_task, 'sched_wakeup': self._process_sched_wakeup, 'sched_wakeup_new': self._process_sched_wakeup, + 'sched_waking': self._process_sched_wakeup, 'sched_process_fork': self._process_sched_process_fork, 'sched_process_exec': self._process_sched_process_exec, + 'sched_pi_setprio': self._process_sched_pi_setprio, } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) - - def _fix_process(self, tid, pid, comm): - """Fix a process' pid and comm if it exists, create it otherwise""" - if tid not in self._state.tids: - proc = sv.Process(tid, pid, comm) - self._state.tids[tid] = proc - else: - proc = self._state.tids[tid] - proc.pid = pid - proc.comm = comm + super().__init__(state, cbs) def _sched_switch_per_cpu(self, cpu_id, next_tid): if cpu_id not in self._state.cpus: @@ -64,40 +55,74 @@ else: cpu.current_tid = next_tid - def _sched_switch_per_tid(self, next_tid, next_comm, prev_tid): - if next_tid not in self._state.tids: - if next_tid == 0: + def _create_proc(self, tid): + if tid not in self._state.tids: + if tid == 0: # special case for the swapper - self._state.tids[next_tid] = sv.Process(tid=next_tid, pid=0) + self._state.tids[tid] = sv.Process(tid=tid, pid=0) else: - self._state.tids[next_tid] = sv.Process(tid=next_tid) + self._state.tids[tid] = sv.Process(tid=tid) + + def _sched_switch_per_tid(self, next_tid, next_comm, prev_tid): + # Instantiate processes if new + self._create_proc(prev_tid) + self._create_proc(next_tid) next_proc = self._state.tids[next_tid] next_proc.comm = next_comm next_proc.prev_tid = prev_tid + def _check_prio_changed(self, timestamp, tid, prio): + # Ignore swapper + if tid == 0: + return + + proc = self._state.tids[tid] + + if proc.prio != prio: + proc.prio = prio + self._state.send_notification_cb( + 'prio_changed', timestamp=timestamp, tid=tid, prio=prio) + def _process_sched_switch(self, event): timestamp = event.timestamp cpu_id = event['cpu_id'] next_tid = event['next_tid'] next_comm = event['next_comm'] + next_prio = event['next_prio'] prev_tid = event['prev_tid'] + prev_prio = event['prev_prio'] self._sched_switch_per_cpu(cpu_id, next_tid) self._sched_switch_per_tid(next_tid, next_comm, prev_tid) + self._check_prio_changed(timestamp, prev_tid, prev_prio) + self._check_prio_changed(timestamp, next_tid, next_prio) + + wakee_proc = self._state.tids[next_tid] + waker_proc = None + if wakee_proc.last_waker is not None: + waker_proc = self._state.tids[wakee_proc.last_waker] + + cb_data = { + 'timestamp': timestamp, + 'cpu_id': cpu_id, + 'prev_tid': prev_tid, + 'next_tid': next_tid, + 'next_comm': next_comm, + 'wakee_proc': wakee_proc, + 'waker_proc': waker_proc, + } - self._state.send_notification_cb('sched_switch_per_cpu', - timestamp=timestamp, - cpu_id=cpu_id, - next_tid=next_tid) - self._state.send_notification_cb('sched_switch_per_tid', - timestamp=timestamp, - prev_tid=prev_tid, - next_tid=next_tid, - next_comm=next_comm) + self._state.send_notification_cb('sched_switch_per_cpu', **cb_data) + self._state.send_notification_cb('sched_switch_per_tid', **cb_data) + + wakee_proc.last_wakeup = None + wakee_proc.last_waker = None def _process_sched_migrate_task(self, event): tid = event['tid'] + prio = event['prio'] + if tid not in self._state.tids: proc = sv.Process() proc.tid = tid @@ -106,20 +131,46 @@ else: proc = self._state.tids[tid] - self._state.send_notification_cb('sched_migrate_task', proc=proc) + self._state.send_notification_cb( + 'sched_migrate_task', proc=proc, cpu_id=event['cpu_id']) + self._check_prio_changed(event.timestamp, tid, prio) def _process_sched_wakeup(self, event): target_cpu = event['target_cpu'] + current_cpu = event['cpu_id'] + prio = event['prio'] tid = event['tid'] + if self._state.tracer_version < self.PRIO_OFFSET_FIX_VERSION: + prio -= 100 + if target_cpu not in self._state.cpus: self._state.cpus[target_cpu] = sv.CPU(target_cpu) + if current_cpu not in self._state.cpus: + self._state.cpus[current_cpu] = sv.CPU(current_cpu) + + # If the TID is already executing on a CPU, ignore this wakeup + for cpu_id in self._state.cpus: + cpu = self._state.cpus[cpu_id] + if cpu.current_tid == tid: + return + if tid not in self._state.tids: proc = sv.Process() proc.tid = tid self._state.tids[tid] = proc + self._check_prio_changed(event.timestamp, tid, prio) + + # A process can be woken up multiple times, only record + # the first one + if self._state.tids[tid].last_wakeup is None: + self._state.tids[tid].last_wakeup = event.timestamp + if self._state.cpus[current_cpu].current_tid is not None: + self._state.tids[tid].last_waker = \ + self._state.cpus[current_cpu].current_tid + def _process_sched_process_fork(self, event): child_tid = event['child_tid'] child_pid = event['child_pid'] @@ -128,10 +179,15 @@ parent_tid = event['parent_pid'] parent_comm = event['parent_comm'] - child_proc = sv.Process(child_tid, child_pid, child_comm) + if parent_tid not in self._state.tids: + self._state.tids[parent_tid] = sv.Process( + parent_tid, parent_pid, parent_comm) + else: + self._state.tids[parent_tid].pid = parent_pid + self._state.tids[parent_tid].comm = parent_comm - self._fix_process(parent_tid, parent_pid, parent_comm) parent_proc = self._state.tids[parent_pid] + child_proc = sv.Process(child_tid, child_pid, child_comm) for fd in parent_proc.fds: old_fd = parent_proc.fds[fd] @@ -139,10 +195,9 @@ # Note: the parent_proc key in the notification function # refers to the parent of the FD, which in this case is # the child_proc created by the fork - self._state.send_notification_cb('create_fd', - fd=fd, - parent_proc=child_proc, - timestamp=event.timestamp) + self._state.send_notification_cb( + 'create_fd', fd=fd, parent_proc=child_proc, + timestamp=event.timestamp, cpu_id=event['cpu_id']) self._state.tids[child_tid] = child_proc @@ -165,8 +220,14 @@ if proc.fds[fd].cloexec: toremove.append(fd) for fd in toremove: - self._state.send_notification_cb('close_fd', - fd=fd, - parent_proc=proc, - timestamp=event.timestamp) + self._state.send_notification_cb( + 'close_fd', fd=fd, parent_proc=proc, + timestamp=event.timestamp, cpu_id=event['cpu_id']) del proc.fds[fd] + + def _process_sched_pi_setprio(self, event): + timestamp = event.timestamp + newprio = event['newprio'] + tid = event['tid'] + + self._check_prio_changed(timestamp, tid, newprio) diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sp.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sp.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sp.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sp.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -24,13 +22,11 @@ class StateProvider: - def process_event(self, ev): - raise NotImplementedError() - - def _register_cbs(self, cbs): + def __init__(self, state, cbs): + self._state = state self._cbs = cbs - def _process_event_cb(self, ev): + def process_event(self, ev): name = ev.name if name in self._cbs: diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/statedump.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/statedump.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/statedump.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/statedump.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -28,43 +26,46 @@ class StatedumpStateProvider(sp.StateProvider): def __init__(self, state): - self._state = state cbs = { 'lttng_statedump_process_state': self._process_lttng_statedump_process_state, 'lttng_statedump_file_descriptor': self._process_lttng_statedump_file_descriptor } - self._register_cbs(cbs) - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _process_lttng_statedump_process_state(self, event): tid = event['tid'] pid = event['pid'] name = event['name'] + # prio is not in the payload for LTTng-modules < 2.8. Using + # get() will set it to None if the key is not found + prio = event.get('prio') + if tid not in self._state.tids: - proc = sv.Process() - proc.tid = tid - self._state.tids[tid] = proc - else: - proc = self._state.tids[tid] + self._state.tids[tid] = sv.Process(tid=tid) + + proc = self._state.tids[tid] # Even if the process got created earlier, some info might be # missing, add it now. proc.pid = pid proc.comm = name + # However don't override the prio value if we already got the + # information from sched_* events. + if proc.prio is None: + proc.prio = prio if pid != tid: # create the parent if pid not in self._state.tids: - parent = sv.Process() - parent.tid = pid - parent.pid = pid - parent.comm = name - self._state.tids[pid] = parent - else: - parent = self._state.tids[pid] + # FIXME: why is the parent's name set to that of the + # child? does that make sense? + + # tid == pid for the parent process + self._state.tids[pid] = sv.Process(tid=pid, pid=pid, comm=name) + + parent = self._state.tids[pid] # If the thread had opened FDs, they need to be assigned # to the parent. StatedumpStateProvider._assign_fds_to_parent(proc, parent) @@ -79,22 +80,25 @@ cloexec = event['flags'] & common.O_CLOEXEC == common.O_CLOEXEC if pid not in self._state.tids: - proc = sv.Process() - proc.pid = pid - proc.tid = pid - self._state.tids[pid] = proc - else: - proc = self._state.tids[pid] + self._state.tids[pid] = sv.Process(tid=pid, pid=pid) + + proc = self._state.tids[pid] if fd not in proc.fds: proc.fds[fd] = sv.FD(fd, filename, sv.FDType.unknown, cloexec) self._state.send_notification_cb('create_fd', fd=fd, parent_proc=proc, - timestamp=event.timestamp) + timestamp=event.timestamp, + cpu_id=event['cpu_id']) else: # just fix the filename proc.fds[fd].filename = filename + self._state.send_notification_cb('update_fd', + fd=fd, + parent_proc=proc, + timestamp=event.timestamp, + cpu_id=event['cpu_id']) @staticmethod def _assign_fds_to_parent(proc, parent): diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sv.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sv.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/sv.py 2015-07-13 18:00:14.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/sv.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -32,15 +30,18 @@ class Process(): - def __init__(self, tid=None, pid=None, comm=''): + def __init__(self, tid=None, pid=None, comm='', prio=None): self.tid = tid self.pid = pid self.comm = comm + self.prio = prio # indexed by fd self.fds = {} self.current_syscall = None # the process scheduled before this one self.prev_tid = None + self.last_wakeup = None + self.last_waker = None class CPU(): @@ -71,7 +72,13 @@ def process_exit(self, event): self.end_ts = event.timestamp - self.ret = event['ret'] + # On certain architectures (notably arm32), lttng-modules + # versions prior to 2.8 would erroneously trace certain + # syscalls (e.g. mmap2) without their return value. In this + # case, get() will simply set self.ret to None. These syscalls + # with a None return value should simply be ignored down the + # line. + self.ret = event.get('ret') self.duration = self.end_ts - self.begin_ts @classmethod @@ -134,6 +141,13 @@ self.begin_ts = begin_ts self.end_ts = None + @property + def duration(self): + if not self.end_ts or not self.begin_ts: + return None + + return self.end_ts - self.begin_ts + class HardIRQ(IRQ): def __init__(self, id, cpu_id, begin_ts): @@ -446,7 +460,7 @@ DISK_OPEN_SYSCALLS = ['open', 'openat'] # list of syscalls that open a FD on the network # (in the exit_syscall event) - NET_OPEN_SYSCALLS = ['accept', 'accept4', 'socket'] + NET_OPEN_SYSCALLS = ['socket'] # list of syscalls that can duplicate a FD DUP_OPEN_SYSCALLS = ['fcntl', 'dup', 'dup2', 'dup3'] SYNC_SYSCALLS = ['sync', 'sync_file_range', 'fsync', 'fdatasync'] diff -Nru lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/syscalls.py lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/syscalls.py --- lttnganalyses-0.3.0/lttnganalyses/linuxautomaton/syscalls.py 2015-07-13 17:48:11.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/linuxautomaton/syscalls.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# # The MIT License (MIT) # # Copyright (C) 2015 - Julien Desfossez @@ -33,11 +31,7 @@ 'syscall_exit': self._process_syscall_exit } - self._state = state - self._register_cbs(cbs) - - def process_event(self, ev): - self._process_event_cb(ev) + super().__init__(state, cbs) def _process_syscall_entry(self, event): cpu_id = event['cpu_id'] @@ -70,7 +64,8 @@ self._state.send_notification_cb('syscall_exit', proc=proc, - event=event) + event=event, + cpu_id=cpu_id) # If it's an IO Syscall, the IO state provider will take care of # clearing the current syscall, so only clear here if it's not diff -Nru lttnganalyses-0.3.0/lttnganalyses/_version.py lttnganalyses-0.4.3/lttnganalyses/_version.py --- lttnganalyses-0.3.0/lttnganalyses/_version.py 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses/_version.py 2016-03-08 00:52:22.000000000 +0000 @@ -11,8 +11,8 @@ { "dirty": false, "error": null, - "full-revisionid": "ca96c0d5e33ab1d91c2f451903939a75289c1e34", - "version": "0.3.0" + "full-revisionid": "6ec799a0912c32612a0651bf094ec68cec493327", + "version": "0.4.3" } ''' # END VERSION_JSON diff -Nru lttnganalyses-0.3.0/lttnganalyses.egg-info/entry_points.txt lttnganalyses-0.4.3/lttnganalyses.egg-info/entry_points.txt --- lttnganalyses-0.3.0/lttnganalyses.egg-info/entry_points.txt 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses.egg-info/entry_points.txt 2016-03-08 00:52:22.000000000 +0000 @@ -1,13 +1,32 @@ [console_scripts] -lttng-iolatencystats = lttnganalyses.cli.io:runstats -lttng-iousagetop = lttnganalyses.cli.io:runusage lttng-cputop = lttnganalyses.cli.cputop:run +lttng-cputop-mi = lttnganalyses.cli.cputop:run_mi +lttng-iolatencyfreq = lttnganalyses.cli.io:runfreq +lttng-iolatencyfreq-mi = lttnganalyses.cli.io:runfreq_mi +lttng-iolatencystats = lttnganalyses.cli.io:runstats +lttng-iolatencystats-mi = lttnganalyses.cli.io:runstats_mi lttng-iolatencytop = lttnganalyses.cli.io:runlatencytop +lttng-iolatencytop-mi = lttnganalyses.cli.io:runlatencytop_mi +lttng-iolog = lttnganalyses.cli.io:runlog +lttng-iolog-mi = lttnganalyses.cli.io:runlog_mi +lttng-iousagetop = lttnganalyses.cli.io:runusage +lttng-iousagetop-mi = lttnganalyses.cli.io:runusage_mi +lttng-irqfreq = lttnganalyses.cli.irq:runfreq +lttng-irqfreq-mi = lttnganalyses.cli.irq:runfreq_mi +lttng-irqlog = lttnganalyses.cli.irq:runlog +lttng-irqlog-mi = lttnganalyses.cli.irq:runlog_mi lttng-irqstats = lttnganalyses.cli.irq:runstats +lttng-irqstats-mi = lttnganalyses.cli.irq:runstats_mi lttng-memtop = lttnganalyses.cli.memtop:run -lttng-irqlog = lttnganalyses.cli.irq:runlog +lttng-memtop-mi = lttnganalyses.cli.memtop:run_mi +lttng-schedfreq = lttnganalyses.cli.sched:runfreq +lttng-schedfreq-mi = lttnganalyses.cli.sched:runfreq_mi +lttng-schedlog = lttnganalyses.cli.sched:runlog +lttng-schedlog-mi = lttnganalyses.cli.sched:runlog_mi +lttng-schedstats = lttnganalyses.cli.sched:runstats +lttng-schedstats-mi = lttnganalyses.cli.sched:runstats_mi +lttng-schedtop = lttnganalyses.cli.sched:runtop +lttng-schedtop-mi = lttnganalyses.cli.sched:runtop_mi lttng-syscallstats = lttnganalyses.cli.syscallstats:run -lttng-irqfreq = lttnganalyses.cli.irq:runfreq -lttng-iolatencyfreq = lttnganalyses.cli.io:runfreq -lttng-iolog = lttnganalyses.cli.io:runlog +lttng-syscallstats-mi = lttnganalyses.cli.syscallstats:run_mi diff -Nru lttnganalyses-0.3.0/lttnganalyses.egg-info/PKG-INFO lttnganalyses-0.4.3/lttnganalyses.egg-info/PKG-INFO --- lttnganalyses-0.3.0/lttnganalyses.egg-info/PKG-INFO 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses.egg-info/PKG-INFO 2016-03-08 00:52:22.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: lttnganalyses -Version: 0.3.0 +Version: 0.4.3 Summary: LTTng analyses Home-page: https://github.com/lttng/lttng-analyses Author: Julien Desfossez @@ -53,13 +53,13 @@ apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: @@ -82,7 +82,7 @@ .. code-block:: bash - pip3 install --upgrade https://github.com/lttng/lttng-analyses/tarball/master + pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== diff -Nru lttnganalyses-0.3.0/lttnganalyses.egg-info/requires.txt lttnganalyses-0.4.3/lttnganalyses.egg-info/requires.txt --- lttnganalyses-0.3.0/lttnganalyses.egg-info/requires.txt 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses.egg-info/requires.txt 2016-03-08 00:52:22.000000000 +0000 @@ -1,4 +1,3 @@ - [progressbar] -progressbar \ No newline at end of file +progressbar diff -Nru lttnganalyses-0.3.0/lttnganalyses.egg-info/SOURCES.txt lttnganalyses-0.4.3/lttnganalyses.egg-info/SOURCES.txt --- lttnganalyses-0.3.0/lttnganalyses.egg-info/SOURCES.txt 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/lttnganalyses.egg-info/SOURCES.txt 2016-03-08 00:52:22.000000000 +0000 @@ -1,7 +1,25 @@ +ChangeLog +LICENSE MANIFEST.in README.rst lttng-analyses-record +lttng-cputop +lttng-iolatencyfreq +lttng-iolatencystats +lttng-iolatencytop +lttng-iolog +lttng-iousagetop +lttng-irqfreq +lttng-irqlog +lttng-irqstats +lttng-memtop +lttng-schedfreq +lttng-schedlog +lttng-schedstats +lttng-schedtop +lttng-syscallstats lttng-track-process +mit-license.txt setup.cfg setup.py versioneer.py @@ -13,21 +31,28 @@ lttnganalyses.egg-info/entry_points.txt lttnganalyses.egg-info/requires.txt lttnganalyses.egg-info/top_level.txt -lttnganalyses/ascii_graph/__init__.py lttnganalyses/cli/__init__.py lttnganalyses/cli/command.py lttnganalyses/cli/cputop.py lttnganalyses/cli/io.py lttnganalyses/cli/irq.py lttnganalyses/cli/memtop.py +lttnganalyses/cli/mi.py lttnganalyses/cli/progressbar.py +lttnganalyses/cli/sched.py lttnganalyses/cli/syscallstats.py +lttnganalyses/cli/termgraph.py +lttnganalyses/common/__init__.py +lttnganalyses/common/format_utils.py +lttnganalyses/common/version_utils.py lttnganalyses/core/__init__.py lttnganalyses/core/analysis.py lttnganalyses/core/cputop.py lttnganalyses/core/io.py lttnganalyses/core/irq.py lttnganalyses/core/memtop.py +lttnganalyses/core/sched.py +lttnganalyses/core/stats.py lttnganalyses/core/syscalls.py lttnganalyses/linuxautomaton/__init__.py lttnganalyses/linuxautomaton/automaton.py @@ -41,4 +66,16 @@ lttnganalyses/linuxautomaton/sp.py lttnganalyses/linuxautomaton/statedump.py lttnganalyses/linuxautomaton/sv.py -lttnganalyses/linuxautomaton/syscalls.py \ No newline at end of file +lttnganalyses/linuxautomaton/syscalls.py +tests/__init__.py +tests/analysis_test.py +tests/gen_ctfwriter.py +tests/test_cputop.py +tests/test_io.py +tests/test_irq.py +tests/trace_writer.py +tests/expected/cputop.txt +tests/expected/iolatencytop.txt +tests/expected/iousagetop.txt +tests/expected/irqlog.txt +tests/expected/irqstats.txt \ No newline at end of file diff -Nru lttnganalyses-0.3.0/lttng-analyses-record lttnganalyses-0.4.3/lttng-analyses-record --- lttnganalyses-0.3.0/lttng-analyses-record 2015-07-13 21:06:05.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-analyses-record 2016-02-29 16:40:09.000000000 +0000 @@ -88,7 +88,7 @@ lttng enable-channel -k chan1 --subbuf-size=8M >/dev/null # events that always work -lttng enable-event -s $SESSION_NAME -k sched_switch,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c chan1 >/dev/null +lttng enable-event -s $SESSION_NAME -k sched_switch,sched_wakeup,sched_waking,block_rq_complete,block_rq_issue,block_bio_remap,block_bio_backmerge,netif_receive_skb,net_dev_xmit,sched_process_fork,sched_process_exec,lttng_statedump_process_state,lttng_statedump_file_descriptor,lttng_statedump_block_device,mm_vmscan_wakeup_kswapd,mm_page_free,mm_page_alloc,block_dirty_buffer,irq_handler_entry,irq_handler_exit,softirq_entry,softirq_exit,softirq_raise -c chan1 >/dev/null [[ $? != 0 ]] && echo "Warning: some events were not enabled, some analyses might not be complete" # events that might fail on specific kernels and that are not mandatory diff -Nru lttnganalyses-0.3.0/lttng-cputop lttnganalyses-0.4.3/lttng-cputop --- lttnganalyses-0.3.0/lttng-cputop 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-cputop 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import cputop + +if __name__ == '__main__': + cputop.run() diff -Nru lttnganalyses-0.3.0/lttng-iolatencyfreq lttnganalyses-0.4.3/lttng-iolatencyfreq --- lttnganalyses-0.3.0/lttng-iolatencyfreq 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-iolatencyfreq 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import io + + +if __name__ == '__main__': + io.runfreq() diff -Nru lttnganalyses-0.3.0/lttng-iolatencystats lttnganalyses-0.4.3/lttng-iolatencystats --- lttnganalyses-0.3.0/lttng-iolatencystats 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-iolatencystats 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import io + + +if __name__ == '__main__': + io.runstats() diff -Nru lttnganalyses-0.3.0/lttng-iolatencytop lttnganalyses-0.4.3/lttng-iolatencytop --- lttnganalyses-0.3.0/lttng-iolatencytop 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-iolatencytop 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import io + + +if __name__ == '__main__': + io.runlatencytop() diff -Nru lttnganalyses-0.3.0/lttng-iolog lttnganalyses-0.4.3/lttng-iolog --- lttnganalyses-0.3.0/lttng-iolog 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-iolog 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import io + + +if __name__ == '__main__': + io.runlog() diff -Nru lttnganalyses-0.3.0/lttng-iousagetop lttnganalyses-0.4.3/lttng-iousagetop --- lttnganalyses-0.3.0/lttng-iousagetop 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-iousagetop 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import io + + +if __name__ == '__main__': + io.runusage() diff -Nru lttnganalyses-0.3.0/lttng-irqfreq lttnganalyses-0.4.3/lttng-irqfreq --- lttnganalyses-0.3.0/lttng-irqfreq 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-irqfreq 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import irq + + +if __name__ == '__main__': + irq.runfreq() diff -Nru lttnganalyses-0.3.0/lttng-irqlog lttnganalyses-0.4.3/lttng-irqlog --- lttnganalyses-0.3.0/lttng-irqlog 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-irqlog 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import irq + + +if __name__ == '__main__': + irq.runlog() diff -Nru lttnganalyses-0.3.0/lttng-irqstats lttnganalyses-0.4.3/lttng-irqstats --- lttnganalyses-0.3.0/lttng-irqstats 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-irqstats 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import irq + + +if __name__ == '__main__': + irq.runstats() diff -Nru lttnganalyses-0.3.0/lttng-memtop lttnganalyses-0.4.3/lttng-memtop --- lttnganalyses-0.3.0/lttng-memtop 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-memtop 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import memtop + + +if __name__ == '__main__': + memtop.run() diff -Nru lttnganalyses-0.3.0/lttng-schedfreq lttnganalyses-0.4.3/lttng-schedfreq --- lttnganalyses-0.3.0/lttng-schedfreq 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-schedfreq 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import sched + +if __name__ == '__main__': + sched.runfreq() diff -Nru lttnganalyses-0.3.0/lttng-schedlog lttnganalyses-0.4.3/lttng-schedlog --- lttnganalyses-0.3.0/lttng-schedlog 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-schedlog 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import sched + +if __name__ == '__main__': + sched.runlog() diff -Nru lttnganalyses-0.3.0/lttng-schedstats lttnganalyses-0.4.3/lttng-schedstats --- lttnganalyses-0.3.0/lttng-schedstats 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-schedstats 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import sched + +if __name__ == '__main__': + sched.runstats() diff -Nru lttnganalyses-0.3.0/lttng-schedtop lttnganalyses-0.4.3/lttng-schedtop --- lttnganalyses-0.3.0/lttng-schedtop 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-schedtop 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import sched + +if __name__ == '__main__': + sched.runtop() diff -Nru lttnganalyses-0.3.0/lttng-syscallstats lttnganalyses-0.4.3/lttng-syscallstats --- lttnganalyses-0.3.0/lttng-syscallstats 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/lttng-syscallstats 2015-07-20 22:39:22.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2015 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from lttnganalyses.cli import syscallstats + + +if __name__ == '__main__': + syscallstats.run() diff -Nru lttnganalyses-0.3.0/MANIFEST.in lttnganalyses-0.4.3/MANIFEST.in --- lttnganalyses-0.3.0/MANIFEST.in 2015-07-13 22:17:46.000000000 +0000 +++ lttnganalyses-0.4.3/MANIFEST.in 2016-03-02 17:24:01.000000000 +0000 @@ -1 +1,20 @@ include versioneer.py +recursive-include tests * +include ChangeLog +include LICENSE +include mit-license.txt +include lttng-cputop +include lttng-iolatencyfreq +include lttng-iolatencystats +include lttng-iolatencytop +include lttng-iolog +include lttng-iousagetop +include lttng-irqfreq +include lttng-irqlog +include lttng-irqstats +include lttng-memtop +include lttng-schedfreq +include lttng-schedlog +include lttng-schedstats +include lttng-schedtop +include lttng-syscallstats diff -Nru lttnganalyses-0.3.0/mit-license.txt lttnganalyses-0.4.3/mit-license.txt --- lttnganalyses-0.3.0/mit-license.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/mit-license.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2016 EfficiOS Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru lttnganalyses-0.3.0/PKG-INFO lttnganalyses-0.4.3/PKG-INFO --- lttnganalyses-0.3.0/PKG-INFO 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/PKG-INFO 2016-03-08 00:52:22.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: lttnganalyses -Version: 0.3.0 +Version: 0.4.3 Summary: LTTng analyses Home-page: https://github.com/lttng/lttng-analyses Author: Julien Desfossez @@ -53,13 +53,13 @@ apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: @@ -82,7 +82,7 @@ .. code-block:: bash - pip3 install --upgrade https://github.com/lttng/lttng-analyses/tarball/master + pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== diff -Nru lttnganalyses-0.3.0/README.rst lttnganalyses-0.4.3/README.rst --- lttnganalyses-0.3.0/README.rst 2015-07-13 18:00:19.000000000 +0000 +++ lttnganalyses-0.4.3/README.rst 2016-02-29 16:40:09.000000000 +0000 @@ -45,13 +45,13 @@ apt-get install -y software-properties-common (or python-software-properties on 12.04) apt-add-repository -y ppa:lttng/ppa apt-get update - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On **Debian Sid**: .. code-block:: bash - apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-pip python3-lttnganalyses + apt-get -y install lttng-tools babeltrace lttng-modules-dkms python3-babeltrace python3-progressbar python3-lttnganalyses On other distributions: @@ -74,7 +74,7 @@ .. code-block:: bash - pip3 install --upgrade https://github.com/lttng/lttng-analyses/tarball/master + pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git ============== diff -Nru lttnganalyses-0.3.0/setup.cfg lttnganalyses-0.4.3/setup.cfg --- lttnganalyses-0.3.0/setup.cfg 2015-07-13 22:18:42.000000000 +0000 +++ lttnganalyses-0.4.3/setup.cfg 2016-03-08 00:52:22.000000000 +0000 @@ -7,7 +7,7 @@ parentdir_prefix = lttnganalyses- [egg_info] -tag_build = -tag_date = 0 tag_svn_revision = 0 +tag_date = 0 +tag_build = diff -Nru lttnganalyses-0.3.0/setup.py lttnganalyses-0.4.3/setup.py --- lttnganalyses-0.3.0/setup.py 2015-07-13 22:17:46.000000000 +0000 +++ lttnganalyses-0.4.3/setup.py 2016-02-29 16:40:09.000000000 +0000 @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # # Copyright (C) 2015 - Michael Jeanson # @@ -74,14 +75,15 @@ packages=[ 'lttnganalyses', + 'lttnganalyses.common', 'lttnganalyses.core', 'lttnganalyses.cli', - 'lttnganalyses.linuxautomaton', - 'lttnganalyses.ascii_graph' + 'lttnganalyses.linuxautomaton' ], entry_points={ 'console_scripts': [ + # human-readable output 'lttng-cputop = lttnganalyses.cli.cputop:run', 'lttng-iolatencyfreq = lttnganalyses.cli.io:runfreq', 'lttng-iolatencystats = lttnganalyses.cli.io:runstats', @@ -93,6 +95,27 @@ 'lttng-irqstats = lttnganalyses.cli.irq:runstats', 'lttng-memtop = lttnganalyses.cli.memtop:run', 'lttng-syscallstats = lttnganalyses.cli.syscallstats:run', + 'lttng-schedlog = lttnganalyses.cli.sched:runlog', + 'lttng-schedtop = lttnganalyses.cli.sched:runtop', + 'lttng-schedstats = lttnganalyses.cli.sched:runstats', + 'lttng-schedfreq = lttnganalyses.cli.sched:runfreq', + + # MI mode + 'lttng-cputop-mi = lttnganalyses.cli.cputop:run_mi', + 'lttng-memtop-mi = lttnganalyses.cli.memtop:run_mi', + 'lttng-syscallstats-mi = lttnganalyses.cli.syscallstats:run_mi', + 'lttng-irqfreq-mi = lttnganalyses.cli.irq:runfreq_mi', + 'lttng-irqlog-mi = lttnganalyses.cli.irq:runlog_mi', + 'lttng-irqstats-mi = lttnganalyses.cli.irq:runstats_mi', + 'lttng-iolatencyfreq-mi = lttnganalyses.cli.io:runfreq_mi', + 'lttng-iolatencystats-mi = lttnganalyses.cli.io:runstats_mi', + 'lttng-iolatencytop-mi = lttnganalyses.cli.io:runlatencytop_mi', + 'lttng-iolog-mi = lttnganalyses.cli.io:runlog_mi', + 'lttng-iousagetop-mi = lttnganalyses.cli.io:runusage_mi', + 'lttng-schedlog-mi = lttnganalyses.cli.sched:runlog_mi', + 'lttng-schedtop-mi = lttnganalyses.cli.sched:runtop_mi', + 'lttng-schedstats-mi = lttnganalyses.cli.sched:runstats_mi', + 'lttng-schedfreq-mi = lttnganalyses.cli.sched:runfreq_mi', ], }, @@ -103,5 +126,7 @@ extras_require={ 'progressbar': ["progressbar"] - } + }, + + test_suite='tests', ) diff -Nru lttnganalyses-0.3.0/tests/analysis_test.py lttnganalyses-0.4.3/tests/analysis_test.py --- lttnganalyses-0.3.0/tests/analysis_test.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/analysis_test.py 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,81 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import subprocess +import unittest +from .trace_writer import TraceWriter + + +class AnalysisTest(unittest.TestCase): + COMMON_OPTIONS = '--no-progress --skip-validation --gmt' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.rm_trace = True + + def set_up_class(self): + dirname = os.path.dirname(os.path.realpath(__file__)) + self.data_path = dirname + '/expected/' + self.maxDiff = None + self.trace_writer = TraceWriter() + self.write_trace() + + def tear_down_class(self): + if self.rm_trace: + self.trace_writer.rm_trace() + + def write_trace(self): + raise NotImplementedError + + def run(self, result=None): + self.set_up_class() + super().run(result) + self.tear_down_class() + + return result + + def get_expected_output(self, test_name): + expected_path = os.path.join(self.data_path, test_name + '.txt') + with open(expected_path, 'r') as expected_file: + return expected_file.read() + + def get_cmd_output(self, exec_name, options=''): + cmd_fmt = './{} {} {} {}' + cmd = cmd_fmt.format(exec_name, self.COMMON_OPTIONS, + options, self.trace_writer.trace_root) + + return subprocess.getoutput(cmd) + + def save_test_result(self, result, test_name): + result_path = os.path.join(self.trace_writer.trace_root, test_name) + with open(result_path, 'w') as result_file: + result_file.write(result) + self.rm_trace = False + + def _assertMultiLineEqual(self, result, expected, test_name): + try: + self.assertMultiLineEqual(result, expected) + except AssertionError: + self.save_test_result(result, test_name) + raise diff -Nru lttnganalyses-0.3.0/tests/expected/cputop.txt lttnganalyses-0.4.3/tests/expected/cputop.txt --- lttnganalyses-0.3.0/tests/expected/cputop.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/expected/cputop.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,15 @@ +Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:11.000000000] +Per-TID Usage Process Migrations Priorities +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 100.00 % prog100pc-cpu5 (42) 0 [20] +████████████████████ 25.00 % prog25pc-cpu1 (30665) 0 [20] +████████████████ 20.00 % prog20pc-cpu0 (30664) 0 [20] + +Per-CPU Usage +################################################################################ +████████████████ 20.00 % CPU 0 +████████████████████ 25.00 % CPU 1 +████████████████████████████████████████████████████████████████████████████████ 100.00 % CPU 5 + + +Total CPU Usage: 48.33% diff -Nru lttnganalyses-0.3.0/tests/expected/iolatencytop.txt lttnganalyses-0.4.3/tests/expected/iolatencytop.txt --- lttnganalyses-0.3.0/tests/expected/iolatencytop.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/expected/iolatencytop.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,14 @@ +Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.024000000] + +Top system call latencies open (usec) +Begin End Name Duration (usec) Size Proc PID Filename +[00:00:01.023000000,00:00:01.024000000] open 1000.000 N/A app3 101 test/open/file (fd=42) + +Top system call latencies read (usec) +Begin End Name Duration (usec) Size Proc PID Filename +[00:00:01.008000000,00:00:01.009000000] read 1000.000 100 B app2 100 testfile (fd=3) +[00:00:01.012000000,00:00:01.013000000] read 1000.000 42 B app3 101 unknown (fd=3) + +Top system call latencies write (usec) +Begin End Name Duration (usec) Size Proc PID Filename +[00:00:01.004000000,00:00:01.005000000] write 1000.000 10 B app 99 unknown (fd=4) \ No newline at end of file diff -Nru lttnganalyses-0.3.0/tests/expected/iousagetop.txt lttnganalyses-0.4.3/tests/expected/iousagetop.txt --- lttnganalyses-0.3.0/tests/expected/iousagetop.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/expected/iousagetop.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,54 @@ +Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.024000000] +Per-process I/O Read Process Disk Net Unknown +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 100 B app2 (100) 0 B 0 B 100 B +█████████████████████████████████ 42 B app3 (unknown (tid=101)) 0 B 0 B 42 B + 0 B app (99) 0 B 0 B 0 B + +Per-process I/O Write Process Disk Net Unknown +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 10 B app (99) 0 B 0 B 10 B + 0 B app2 (100) 0 B 0 B 0 B + 0 B app3 (unknown (tid=101)) 0 B 0 B 0 B + +Per-file I/O Read Path +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 100 B testfile +█████████████████████████████████ 42 B unknown (app3) + +Per-file I/O Write Path +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 10 B unknown (app) + +Block I/O Read Process +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 5.00 KiB app (pid=99) + +Block I/O Write Process +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 10.00 KiB app3 (pid=unknown (tid=101)) + +Disk Requests Sector Count Disk +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 20 sectors (8,0) +████████████████████████████████████████ 10 sectors (252,0) + +Disk Request Count Disk +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 1 requests (252,0) +████████████████████████████████████████████████████████████████████████████████ 1 requests (8,0) + +Disk Request Average Latency Disk +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 1.00 ms (252,0) +████████████████████████████████████████████████████████████████████████████████ 1.00 ms (8,0) + +Network Received Bytes Interface +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 200 B wlan0 +████████████████████████████████████████ 100 B wlan1 + +Network Sent Bytes Interface +################################################################################ +████████████████████████████████████████████████████████████████████████████████ 100 B wlan0 + 0 B wlan1 diff -Nru lttnganalyses-0.3.0/tests/expected/irqlog.txt lttnganalyses-0.4.3/tests/expected/irqlog.txt --- lttnganalyses-0.3.0/tests/expected/irqlog.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/expected/irqlog.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,19 @@ +Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.045000000] +Begin End Duration (us) CPU Type # Name +[00:00:01.007000000, 00:00:01.008000000] 1000.000 1 SoftIRQ 1 TIMER_SOFTIRQ (raised at 00:00:01.000000000) +[00:00:01.006000000, 00:00:01.009000000] 3000.000 3 SoftIRQ 1 TIMER_SOFTIRQ (raised at 00:00:01.001000000) +[00:00:01.010000000, 00:00:01.012000000] 2000.000 1 SoftIRQ 9 RCU_SOFTIRQ (raised at 00:00:01.002000000) +[00:00:01.011000000, 00:00:01.013000000] 2000.000 3 SoftIRQ 7 SCHED_SOFTIRQ (raised at 00:00:01.005000000) +[00:00:01.014000000, 00:00:01.015000000] 1000.000 3 SoftIRQ 9 RCU_SOFTIRQ (raised at 00:00:01.004000000) +[00:00:01.016000000, 00:00:01.018000000] 2000.000 0 IRQ 41 ahci +[00:00:01.019000000, 00:00:01.020000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.017000000) +[00:00:01.021000000, 00:00:01.023000000] 2000.000 0 IRQ 41 ahci +[00:00:01.024000000, 00:00:01.025000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.022000000) +[00:00:01.026000000, 00:00:01.028000000] 2000.000 0 IRQ 41 ahci +[00:00:01.029000000, 00:00:01.030000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.027000000) +[00:00:01.031000000, 00:00:01.033000000] 2000.000 0 IRQ 41 ahci +[00:00:01.034000000, 00:00:01.035000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.032000000) +[00:00:01.036000000, 00:00:01.038000000] 2000.000 0 IRQ 41 ahci +[00:00:01.039000000, 00:00:01.040000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.037000000) +[00:00:01.041000000, 00:00:01.043000000] 2000.000 0 IRQ 41 ahci +[00:00:01.044000000, 00:00:01.045000000] 1000.000 0 SoftIRQ 4 BLOCK_SOFTIRQ (raised at 00:00:01.042000000) \ No newline at end of file diff -Nru lttnganalyses-0.3.0/tests/expected/irqstats.txt lttnganalyses-0.4.3/tests/expected/irqstats.txt --- lttnganalyses-0.3.0/tests/expected/irqstats.txt 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/expected/irqstats.txt 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,13 @@ +Timerange: [1970-01-01 00:00:01.000000000, 1970-01-01 00:00:01.045000000] +Hard IRQ Duration (us) + count min avg max stdev +----------------------------------------------------------------------------------| +41: 6 2000.000 2000.000 2000.000 0.000 | + +Soft IRQ Duration (us) Raise latency (us) + count min avg max stdev | count min avg max stdev +----------------------------------------------------------------------------------|------------------------------------------------------------ +1: 2 1000.000 2000.000 3000.000 1414.214 | 2 5000.000 6000.000 7000.000 1414.214 +4: 6 1000.000 1000.000 1000.000 0.000 | 6 2000.000 2000.000 2000.000 0.000 +7: 1 2000.000 2000.000 2000.000 ? | 1 6000.000 6000.000 6000.000 ? +9: 2 1000.000 1500.000 2000.000 707.107 | 2 8000.000 9000.000 10000.000 1414.214 \ No newline at end of file diff -Nru lttnganalyses-0.3.0/tests/gen_ctfwriter.py lttnganalyses-0.4.3/tests/gen_ctfwriter.py --- lttnganalyses-0.3.0/tests/gen_ctfwriter.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/gen_ctfwriter.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +# +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Helper tool to generate CTFWriter code from the metadata of an existing +# trace. +# It used to add code in TraceTest.py. +# Only the basic types are supported, a warning is generated if a field cannot +# be generated so it is easy to look manually at the metadata and fix it. + +import sys +import argparse + +from babeltrace import TraceCollection, CTFScope, CTFTypeId + + +def get_definition_type(field, event): + if field.type == CTFTypeId.INTEGER: + signed = '' + if field.signedness == 0: + signed = 'u' + length = field.length + print(' self.%s.add_field(self.%sint%s_type, "_%s")' % + (event.name, signed, length, field.name)) + elif field.type == CTFTypeId.ARRAY: + print(' self.%s.add_field(self.array%s_type, "_%s")' % + (event.name, field.length, field.name)) + elif field.type == CTFTypeId.STRING: + print(' self.%s.add_field(self.string_type, "_%s")' % + (event.name, field.name)) + else: + print(' # FIXME %s.%s: Unhandled type %d' % (event.name, + field.name, + field.type)) + + +def gen_define(event): + fields = [] + print(' def define_%s(self):' % (event.name)) + print(' self.%s = CTFWriter.EventClass("%s")' % + (event.name, event.name)) + for field in event.fields: + if field.scope == CTFScope.EVENT_FIELDS: + fname = field.name + fields.append(fname) + get_definition_type(field, event) + print(' self.add_event(self.%s)' % event.name) + print('') + return fields + + +def gen_write(event, fields): + f_list = None + for f in fields: + if f_list is None: + f_list = f + else: + f_list = f_list + ", %s" % (f) + print(' def write_%s(self, time_ms, cpu_id, %s):' % (event.name, + f_list)) + print(' event = CTFWriter.Event(self.%s)' % (event.name)) + print(' self.clock.time = time_ms * 1000000') + print(' self.set_int(event.payload("_cpu_id"), cpu_id)') + for field in event.fields: + if field.scope == CTFScope.EVENT_FIELDS: + fname = field.name + if field.type == CTFTypeId.INTEGER: + print(' self.set_int(event.payload("_%s"), %s)' % + (fname, fname)) + elif field.type == CTFTypeId.ARRAY: + print(' self.set_char_array(event.payload("_%s"), ' + '%s)' % (fname, fname)) + elif field.type == CTFTypeId.STRING: + print(' self.set_string(event.payload("_%s"), %s)' % + (fname, fname)) + else: + print(' # FIXME %s.%s: Unhandled type %d' % + (event.name, field.name, field.type)) + print(' self.stream.append_event(event)') + print(' self.stream.flush()') + print('') + + +def gen_parser(handle, args): + for h in handle.values(): + for event in h.events: + fields = gen_define(event) + gen_write(event, fields) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='CTFWriter code generator') + parser.add_argument('path', metavar="", help='Trace path') + args = parser.parse_args() + + traces = TraceCollection() + handle = traces.add_traces_recursive(args.path, "ctf") + if handle is None: + sys.exit(1) + + gen_parser(handle, args) + + for h in handle.values(): + traces.remove_trace(h) diff -Nru lttnganalyses-0.3.0/tests/__init__.py lttnganalyses-0.4.3/tests/__init__.py --- lttnganalyses-0.3.0/tests/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/__init__.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,21 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff -Nru lttnganalyses-0.3.0/tests/test_cputop.py lttnganalyses-0.4.3/tests/test_cputop.py --- lttnganalyses-0.3.0/tests/test_cputop.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/test_cputop.py 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,48 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .analysis_test import AnalysisTest + + +class CpuTest(AnalysisTest): + def write_trace(self): + # runs the whole time: 100% + self.trace_writer.write_sched_switch(1000, 5, 'swapper/5', + 0, 'prog100pc-cpu5', 42) + # runs for 2s alternating with swapper out every 100ms + self.trace_writer.sched_switch_50pc(1100, 5000, 0, 100, 'swapper/0', + 0, 'prog20pc-cpu0', 30664) + # runs for 2.5s alternating with swapper out every 100ms + self.trace_writer.sched_switch_50pc(5100, 10000, 1, 100, 'swapper/1', + 0, 'prog25pc-cpu1', 30665) + # switch out prog100pc-cpu5 + self.trace_writer.write_sched_switch(11000, 5, 'prog100pc-cpu5', + 42, 'swapper/5', 0) + self.trace_writer.flush() + + def test_cputop(self): + test_name = 'cputop' + expected = self.get_expected_output(test_name) + result = self.get_cmd_output('lttng-cputop') + + self._assertMultiLineEqual(result, expected, test_name) diff -Nru lttnganalyses-0.3.0/tests/test_io.py lttnganalyses-0.4.3/tests/test_io.py --- lttnganalyses-0.3.0/tests/test_io.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/test_io.py 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,79 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .analysis_test import AnalysisTest + + +class IoTest(AnalysisTest): + def write_trace(self): + # app (99) is known at statedump + self.trace_writer.write_lttng_statedump_process_state( + 1000, 0, 99, 99, 99, 99, 98, 98, 'app', 0, 5, 0, 5, 0) + # app2 (100) unknown at statedump has testfile, FD 3 defined at + # statedump + self.trace_writer.write_lttng_statedump_file_descriptor( + 1001, 0, 100, 3, 0, 0, 'testfile') + # app write 10 bytes to FD 4 + self.trace_writer.write_sched_switch(1002, 0, 'swapper/0', 0, 'app', + 99) + self.trace_writer.write_syscall_write(1004, 0, 1, 4, 0xabcd, 10, 10) + # app2 reads 100 bytes in FD 3 + self.trace_writer.write_sched_switch(1006, 0, 'app', 99, 'app2', 100) + self.trace_writer.write_syscall_read(1008, 0, 1, 3, 0xcafe, 100, 100) + # app3 and its FD 3 are completely unknown at statedump, tries to read + # 100 bytes from FD 3 but only gets 42 + self.trace_writer.write_sched_switch(1010, 0, 'app2', 100, 'app3', 101) + self.trace_writer.write_syscall_read(1012, 0, 1, 3, 0xcafe, 100, 42) + # block write + self.trace_writer.write_block_rq_issue(1015, 0, 264241152, 33, 10, 40, + 99, 0, 0, '', 'app') + self.trace_writer.write_block_rq_complete(1016, 0, 264241152, 33, 10, + 0, 0, 0, '') + # block read + self.trace_writer.write_block_rq_issue(1017, 0, 8388608, 33, 20, 90, + 101, 1, 0, '', 'app3') + self.trace_writer.write_block_rq_complete(1018, 0, 8388608, 33, 20, 0, + 1, 0, '') + # net xmit + self.trace_writer.write_net_dev_xmit(1020, 2, 0xff, 32, 100, 'wlan0') + # net receive + self.trace_writer.write_netif_receive_skb(1021, 1, 0xff, 100, 'wlan1') + self.trace_writer.write_netif_receive_skb(1022, 1, 0xff, 200, 'wlan0') + # syscall open + self.trace_writer.write_syscall_open(1023, 0, 1, 'test/open/file', 0, + 0, 42) + self.trace_writer.flush() + + def test_iousagetop(self): + test_name = 'iousagetop' + expected = self.get_expected_output(test_name) + result = self.get_cmd_output('lttng-iousagetop') + + self._assertMultiLineEqual(result, expected, test_name) + + def test_iolatencytop(self): + test_name = 'iolatencytop' + expected = self.get_expected_output(test_name) + result = self.get_cmd_output('lttng-iolatencytop') + + self._assertMultiLineEqual(result, expected, test_name) diff -Nru lttnganalyses-0.3.0/tests/test_irq.py lttnganalyses-0.4.3/tests/test_irq.py --- lttnganalyses-0.3.0/tests/test_irq.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/test_irq.py 2016-03-08 00:50:39.000000000 +0000 @@ -0,0 +1,89 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .analysis_test import AnalysisTest + + +class IrqTest(AnalysisTest): + def write_trace(self): + self.trace_writer.write_softirq_raise(1000, 1, 1) + self.trace_writer.write_softirq_raise(1001, 3, 1) + self.trace_writer.write_softirq_raise(1002, 1, 9) + self.trace_writer.write_softirq_exit(1003, 0, 4) + self.trace_writer.write_softirq_raise(1004, 3, 9) + self.trace_writer.write_softirq_raise(1005, 3, 7) + self.trace_writer.write_softirq_entry(1006, 3, 1) + self.trace_writer.write_softirq_entry(1007, 1, 1) + self.trace_writer.write_softirq_exit(1008, 1, 1) + self.trace_writer.write_softirq_exit(1009, 3, 1) + self.trace_writer.write_softirq_entry(1010, 1, 9) + self.trace_writer.write_softirq_entry(1011, 3, 7) + self.trace_writer.write_softirq_exit(1012, 1, 9) + self.trace_writer.write_softirq_exit(1013, 3, 7) + self.trace_writer.write_softirq_entry(1014, 3, 9) + self.trace_writer.write_softirq_exit(1015, 3, 9) + self.trace_writer.write_irq_handler_entry(1016, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1017, 0, 4) + self.trace_writer.write_irq_handler_exit(1018, 0, 41, 1) + self.trace_writer.write_softirq_entry(1019, 0, 4) + self.trace_writer.write_softirq_exit(1020, 0, 4) + self.trace_writer.write_irq_handler_entry(1021, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1022, 0, 4) + self.trace_writer.write_irq_handler_exit(1023, 0, 41, 1) + self.trace_writer.write_softirq_entry(1024, 0, 4) + self.trace_writer.write_softirq_exit(1025, 0, 4) + self.trace_writer.write_irq_handler_entry(1026, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1027, 0, 4) + self.trace_writer.write_irq_handler_exit(1028, 0, 41, 1) + self.trace_writer.write_softirq_entry(1029, 0, 4) + self.trace_writer.write_softirq_exit(1030, 0, 4) + self.trace_writer.write_irq_handler_entry(1031, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1032, 0, 4) + self.trace_writer.write_irq_handler_exit(1033, 0, 41, 1) + self.trace_writer.write_softirq_entry(1034, 0, 4) + self.trace_writer.write_softirq_exit(1035, 0, 4) + self.trace_writer.write_irq_handler_entry(1036, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1037, 0, 4) + self.trace_writer.write_irq_handler_exit(1038, 0, 41, 1) + self.trace_writer.write_softirq_entry(1039, 0, 4) + self.trace_writer.write_softirq_exit(1040, 0, 4) + self.trace_writer.write_irq_handler_entry(1041, 0, 41, 'ahci') + self.trace_writer.write_softirq_raise(1042, 0, 4) + self.trace_writer.write_irq_handler_exit(1043, 0, 41, 1) + self.trace_writer.write_softirq_entry(1044, 0, 4) + self.trace_writer.write_softirq_exit(1045, 0, 4) + self.trace_writer.flush() + + def test_irqstats(self): + test_name = 'irqstats' + expected = self.get_expected_output(test_name) + result = self.get_cmd_output('lttng-irqstats') + + self._assertMultiLineEqual(result, expected, test_name) + + def test_irqlog(self): + test_name = 'irqlog' + expected = self.get_expected_output(test_name) + result = self.get_cmd_output('lttng-irqlog') + + self._assertMultiLineEqual(result, expected, test_name) diff -Nru lttnganalyses-0.3.0/tests/trace_writer.py lttnganalyses-0.4.3/tests/trace_writer.py --- lttnganalyses-0.3.0/tests/trace_writer.py 1970-01-01 00:00:00.000000000 +0000 +++ lttnganalyses-0.4.3/tests/trace_writer.py 2016-02-29 16:40:09.000000000 +0000 @@ -0,0 +1,534 @@ +# The MIT License (MIT) +# +# Copyright (C) 2016 - Julien Desfossez +# Antoine Busque +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys +import os +import shutil +import tempfile +from babeltrace import CTFWriter, CTFStringEncoding + + +class TraceWriter(): + def __init__(self): + self._trace_root = tempfile.mkdtemp() + self.trace_path = os.path.join(self.trace_root, "kernel") + self.create_writer() + self.create_stream_class() + self.define_base_types() + self.define_events() + self.create_stream() + + @property + def trace_root(self): + return self._trace_root + + def rm_trace(self): + shutil.rmtree(self.trace_root) + + def flush(self): + self.writer.flush_metadata() + self.stream.flush() + + def create_writer(self): + self.clock = CTFWriter.Clock("A_clock") + self.clock.description = "Simple clock" + self.writer = CTFWriter.Writer(self.trace_path) + self.writer.add_clock(self.clock) + self.writer.add_environment_field("Python_version", + str(sys.version_info)) + self.writer.add_environment_field("tracer_major", 2) + self.writer.add_environment_field("tracer_minor", 8) + self.writer.add_environment_field("tracer_patchlevel", 0) + + def create_stream_class(self): + self.stream_class = CTFWriter.StreamClass("test_stream") + self.stream_class.clock = self.clock + + def define_base_types(self): + self.char8_type = CTFWriter.IntegerFieldDeclaration(8) + self.char8_type.signed = True + self.char8_type.encoding = CTFStringEncoding.UTF8 + self.char8_type.alignment = 8 + + self.int16_type = CTFWriter.IntegerFieldDeclaration(16) + self.int16_type.signed = True + self.int16_type.alignment = 8 + + self.uint16_type = CTFWriter.IntegerFieldDeclaration(16) + self.uint16_type.signed = False + self.uint16_type.alignment = 8 + + self.int32_type = CTFWriter.IntegerFieldDeclaration(32) + self.int32_type.signed = True + self.int32_type.alignment = 8 + + self.uint32_type = CTFWriter.IntegerFieldDeclaration(32) + self.uint32_type.signed = False + self.uint32_type.alignment = 8 + + self.int64_type = CTFWriter.IntegerFieldDeclaration(64) + self.int64_type.signed = True + self.int64_type.alignment = 8 + + self.uint64_type = CTFWriter.IntegerFieldDeclaration(64) + self.uint64_type.signed = False + self.uint64_type.alignment = 8 + + self.array16_type = CTFWriter.ArrayFieldDeclaration(self.char8_type, + 16) + + self.string_type = CTFWriter.StringFieldDeclaration() + + def add_event(self, event): + event.add_field(self.uint32_type, "_cpu_id") + self.stream_class.add_event_class(event) + + def define_sched_switch(self): + self.sched_switch = CTFWriter.EventClass("sched_switch") + self.sched_switch.add_field(self.array16_type, "_prev_comm") + self.sched_switch.add_field(self.int32_type, "_prev_tid") + self.sched_switch.add_field(self.int32_type, "_prev_prio") + self.sched_switch.add_field(self.int64_type, "_prev_state") + self.sched_switch.add_field(self.array16_type, "_next_comm") + self.sched_switch.add_field(self.int32_type, "_next_tid") + self.sched_switch.add_field(self.int32_type, "_next_prio") + self.add_event(self.sched_switch) + + def define_softirq_raise(self): + self.softirq_raise = CTFWriter.EventClass("softirq_raise") + self.softirq_raise.add_field(self.uint32_type, "_vec") + self.add_event(self.softirq_raise) + + def define_softirq_entry(self): + self.softirq_entry = CTFWriter.EventClass("softirq_entry") + self.softirq_entry.add_field(self.uint32_type, "_vec") + self.add_event(self.softirq_entry) + + def define_softirq_exit(self): + self.softirq_exit = CTFWriter.EventClass("softirq_exit") + self.softirq_exit.add_field(self.uint32_type, "_vec") + self.add_event(self.softirq_exit) + + def define_irq_handler_entry(self): + self.irq_handler_entry = CTFWriter.EventClass("irq_handler_entry") + self.irq_handler_entry.add_field(self.int32_type, "_irq") + self.irq_handler_entry.add_field(self.string_type, "_name") + self.add_event(self.irq_handler_entry) + + def define_irq_handler_exit(self): + self.irq_handler_exit = CTFWriter.EventClass("irq_handler_exit") + self.irq_handler_exit.add_field(self.int32_type, "_irq") + self.irq_handler_exit.add_field(self.int32_type, "_ret") + self.add_event(self.irq_handler_exit) + + def define_syscall_entry_write(self): + self.syscall_entry_write = CTFWriter.EventClass("syscall_entry_write") + self.syscall_entry_write.add_field(self.uint32_type, "_fd") + self.syscall_entry_write.add_field(self.uint64_type, "_buf") + self.syscall_entry_write.add_field(self.uint64_type, "_count") + self.add_event(self.syscall_entry_write) + + def define_syscall_exit_write(self): + self.syscall_exit_write = CTFWriter.EventClass("syscall_exit_write") + self.syscall_exit_write.add_field(self.int64_type, "_ret") + self.add_event(self.syscall_exit_write) + + def define_syscall_entry_read(self): + self.syscall_entry_read = CTFWriter.EventClass("syscall_entry_read") + self.syscall_entry_read.add_field(self.uint32_type, "_fd") + self.syscall_entry_read.add_field(self.uint64_type, "_count") + self.add_event(self.syscall_entry_read) + + def define_syscall_exit_read(self): + self.syscall_exit_read = CTFWriter.EventClass("syscall_exit_read") + self.syscall_exit_read.add_field(self.uint64_type, "_buf") + self.syscall_exit_read.add_field(self.int64_type, "_ret") + self.add_event(self.syscall_exit_read) + + def define_syscall_entry_open(self): + self.syscall_entry_open = CTFWriter.EventClass("syscall_entry_open") + self.syscall_entry_open.add_field(self.string_type, "_filename") + self.syscall_entry_open.add_field(self.int32_type, "_flags") + self.syscall_entry_open.add_field(self.uint16_type, "_mode") + self.add_event(self.syscall_entry_open) + + def define_syscall_exit_open(self): + self.syscall_exit_open = CTFWriter.EventClass("syscall_exit_open") + self.syscall_exit_open.add_field(self.int64_type, "_ret") + self.add_event(self.syscall_exit_open) + + def define_lttng_statedump_process_state(self): + self.lttng_statedump_process_state = CTFWriter.EventClass( + "lttng_statedump_process_state") + self.lttng_statedump_process_state.add_field(self.int32_type, "_tid") + self.lttng_statedump_process_state.add_field(self.int32_type, "_vtid") + self.lttng_statedump_process_state.add_field(self.int32_type, "_pid") + self.lttng_statedump_process_state.add_field(self.int32_type, "_vpid") + self.lttng_statedump_process_state.add_field(self.int32_type, "_ppid") + self.lttng_statedump_process_state.add_field(self.int32_type, "_vppid") + self.lttng_statedump_process_state.add_field(self.array16_type, + "_name") + self.lttng_statedump_process_state.add_field(self.int32_type, "_type") + self.lttng_statedump_process_state.add_field(self.int32_type, "_mode") + self.lttng_statedump_process_state.add_field(self.int32_type, + "_submode") + self.lttng_statedump_process_state.add_field(self.int32_type, + "_status") + self.lttng_statedump_process_state.add_field(self.int32_type, + "_ns_level") + self.add_event(self.lttng_statedump_process_state) + + def define_lttng_statedump_file_descriptor(self): + self.lttng_statedump_file_descriptor = CTFWriter.EventClass( + "lttng_statedump_file_descriptor") + self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_pid") + self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_fd") + self.lttng_statedump_file_descriptor.add_field(self.uint32_type, + "_flags") + self.lttng_statedump_file_descriptor.add_field(self.uint32_type, + "_fmode") + self.lttng_statedump_file_descriptor.add_field(self.string_type, + "_filename") + self.add_event(self.lttng_statedump_file_descriptor) + + def define_sched_wakeup(self): + self.sched_wakeup = CTFWriter.EventClass("sched_wakeup") + self.sched_wakeup.add_field(self.array16_type, "_comm") + self.sched_wakeup.add_field(self.int32_type, "_tid") + self.sched_wakeup.add_field(self.int32_type, "_prio") + self.sched_wakeup.add_field(self.int32_type, "_success") + self.sched_wakeup.add_field(self.int32_type, "_target_cpu") + self.add_event(self.sched_wakeup) + + def define_sched_waking(self): + self.sched_waking = CTFWriter.EventClass("sched_waking") + self.sched_waking.add_field(self.array16_type, "_comm") + self.sched_waking.add_field(self.int32_type, "_tid") + self.sched_waking.add_field(self.int32_type, "_prio") + self.sched_waking.add_field(self.int32_type, "_target_cpu") + self.add_event(self.sched_waking) + + def define_block_rq_complete(self): + self.block_rq_complete = CTFWriter.EventClass("block_rq_complete") + self.block_rq_complete.add_field(self.uint32_type, "_dev") + self.block_rq_complete.add_field(self.uint64_type, "_sector") + self.block_rq_complete.add_field(self.uint32_type, "_nr_sector") + self.block_rq_complete.add_field(self.int32_type, "_errors") + self.block_rq_complete.add_field(self.uint32_type, "_rwbs") + self.block_rq_complete.add_field(self.uint64_type, "__cmd_length") + self.block_rq_complete.add_field(self.array16_type, "_cmd") + self.add_event(self.block_rq_complete) + + def define_block_rq_issue(self): + self.block_rq_issue = CTFWriter.EventClass("block_rq_issue") + self.block_rq_issue.add_field(self.uint32_type, "_dev") + self.block_rq_issue.add_field(self.uint64_type, "_sector") + self.block_rq_issue.add_field(self.uint32_type, "_nr_sector") + self.block_rq_issue.add_field(self.uint32_type, "_bytes") + self.block_rq_issue.add_field(self.int32_type, "_tid") + self.block_rq_issue.add_field(self.uint32_type, "_rwbs") + self.block_rq_issue.add_field(self.uint64_type, "__cmd_length") + self.block_rq_issue.add_field(self.array16_type, "_cmd") + self.block_rq_issue.add_field(self.array16_type, "_comm") + self.add_event(self.block_rq_issue) + + def define_net_dev_xmit(self): + self.net_dev_xmit = CTFWriter.EventClass("net_dev_xmit") + self.net_dev_xmit.add_field(self.uint64_type, "_skbaddr") + self.net_dev_xmit.add_field(self.int32_type, "_rc") + self.net_dev_xmit.add_field(self.uint32_type, "_len") + self.net_dev_xmit.add_field(self.string_type, "_name") + self.add_event(self.net_dev_xmit) + + def define_netif_receive_skb(self): + self.netif_receive_skb = CTFWriter.EventClass("netif_receive_skb") + self.netif_receive_skb.add_field(self.uint64_type, "_skbaddr") + self.netif_receive_skb.add_field(self.uint32_type, "_len") + self.netif_receive_skb.add_field(self.string_type, "_name") + self.add_event(self.netif_receive_skb) + + def define_events(self): + self.define_sched_switch() + self.define_softirq_raise() + self.define_softirq_entry() + self.define_softirq_exit() + self.define_irq_handler_entry() + self.define_irq_handler_exit() + self.define_syscall_entry_write() + self.define_syscall_exit_write() + self.define_syscall_entry_read() + self.define_syscall_exit_read() + self.define_syscall_entry_open() + self.define_syscall_exit_open() + self.define_lttng_statedump_process_state() + self.define_lttng_statedump_file_descriptor() + self.define_sched_wakeup() + self.define_sched_waking() + self.define_block_rq_complete() + self.define_block_rq_issue() + self.define_net_dev_xmit() + self.define_netif_receive_skb() + + def create_stream(self): + self.stream = self.writer.create_stream(self.stream_class) + + def set_char_array(self, event, string): + if len(string) > 16: + string = string[0:16] + else: + string = "%s" % (string + "\0" * (16 - len(string))) + + for i, char in enumerate(string): + event.field(i).value = ord(char) + + def set_int(self, event, value): + event.value = value + + def set_string(self, event, value): + event.value = value + + def write_softirq_raise(self, time_ms, cpu_id, vec): + event = CTFWriter.Event(self.softirq_raise) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_vec"), vec) + self.stream.append_event(event) + self.stream.flush() + + def write_softirq_entry(self, time_ms, cpu_id, vec): + event = CTFWriter.Event(self.softirq_entry) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_vec"), vec) + self.stream.append_event(event) + self.stream.flush() + + def write_softirq_exit(self, time_ms, cpu_id, vec): + event = CTFWriter.Event(self.softirq_exit) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_vec"), vec) + self.stream.append_event(event) + self.stream.flush() + + def write_irq_handler_entry(self, time_ms, cpu_id, irq, name): + event = CTFWriter.Event(self.irq_handler_entry) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_irq"), irq) + self.set_string(event.payload("_name"), name) + self.stream.append_event(event) + self.stream.flush() + + def write_irq_handler_exit(self, time_ms, cpu_id, irq, ret): + event = CTFWriter.Event(self.irq_handler_exit) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_irq"), irq) + self.set_int(event.payload("_ret"), ret) + self.stream.append_event(event) + self.stream.flush() + + def write_syscall_write(self, time_ms, cpu_id, delay, fd, buf, count, ret): + event_entry = CTFWriter.Event(self.syscall_entry_write) + self.clock.time = time_ms * 1000000 + self.set_int(event_entry.payload("_cpu_id"), cpu_id) + self.set_int(event_entry.payload("_fd"), fd) + self.set_int(event_entry.payload("_buf"), buf) + self.set_int(event_entry.payload("_count"), count) + self.stream.append_event(event_entry) + + event_exit = CTFWriter.Event(self.syscall_exit_write) + self.clock.time = (time_ms + delay) * 1000000 + self.set_int(event_exit.payload("_cpu_id"), cpu_id) + self.set_int(event_exit.payload("_ret"), ret) + self.stream.append_event(event_exit) + self.stream.flush() + + def write_syscall_read(self, time_ms, cpu_id, delay, fd, buf, count, ret): + event_entry = CTFWriter.Event(self.syscall_entry_read) + self.clock.time = time_ms * 1000000 + self.set_int(event_entry.payload("_cpu_id"), cpu_id) + self.set_int(event_entry.payload("_fd"), fd) + self.set_int(event_entry.payload("_count"), count) + self.stream.append_event(event_entry) + + event_exit = CTFWriter.Event(self.syscall_exit_read) + self.clock.time = (time_ms + delay) * 1000000 + self.set_int(event_exit.payload("_cpu_id"), cpu_id) + self.set_int(event_exit.payload("_buf"), buf) + self.set_int(event_exit.payload("_ret"), ret) + self.stream.append_event(event_exit) + self.stream.flush() + + def write_syscall_open(self, time_ms, cpu_id, delay, filename, flags, + mode, ret): + event = CTFWriter.Event(self.syscall_entry_open) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_string(event.payload("_filename"), filename) + self.set_int(event.payload("_flags"), flags) + self.set_int(event.payload("_mode"), mode) + self.stream.append_event(event) + self.stream.flush() + + event = CTFWriter.Event(self.syscall_exit_open) + self.clock.time = (time_ms + delay) * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_ret"), ret) + self.stream.append_event(event) + self.stream.flush() + + def write_lttng_statedump_file_descriptor(self, time_ms, cpu_id, pid, fd, + flags, fmode, filename): + event = CTFWriter.Event(self.lttng_statedump_file_descriptor) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_pid"), pid) + self.set_int(event.payload("_fd"), fd) + self.set_int(event.payload("_flags"), flags) + self.set_int(event.payload("_fmode"), fmode) + self.set_string(event.payload("_filename"), filename) + self.stream.append_event(event) + self.stream.flush() + + def write_lttng_statedump_process_state(self, time_ms, cpu_id, tid, vtid, + pid, vpid, ppid, vppid, name, type, + mode, submode, status, ns_level): + event = CTFWriter.Event(self.lttng_statedump_process_state) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_tid"), tid) + self.set_int(event.payload("_vtid"), vtid) + self.set_int(event.payload("_pid"), pid) + self.set_int(event.payload("_vpid"), vpid) + self.set_int(event.payload("_ppid"), ppid) + self.set_int(event.payload("_vppid"), vppid) + self.set_char_array(event.payload("_name"), name) + self.set_int(event.payload("_type"), type) + self.set_int(event.payload("_mode"), mode) + self.set_int(event.payload("_submode"), submode) + self.set_int(event.payload("_status"), status) + self.set_int(event.payload("_ns_level"), ns_level) + self.stream.append_event(event) + self.stream.flush() + + def write_sched_wakeup(self, time_ms, cpu_id, comm, tid, prio, target_cpu): + event = CTFWriter.Event(self.sched_wakeup) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_char_array(event.payload("_comm"), comm) + self.set_int(event.payload("_tid"), tid) + self.set_int(event.payload("_prio"), prio) + self.set_int(event.payload("_target_cpu"), target_cpu) + self.stream.append_event(event) + self.stream.flush() + + def write_sched_waking(self, time_ms, cpu_id, comm, tid, prio, target_cpu): + event = CTFWriter.Event(self.sched_waking) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_char_array(event.payload("_comm"), comm) + self.set_int(event.payload("_tid"), tid) + self.set_int(event.payload("_prio"), prio) + self.set_int(event.payload("_target_cpu"), target_cpu) + self.stream.append_event(event) + self.stream.flush() + + def write_block_rq_complete(self, time_ms, cpu_id, dev, sector, nr_sector, + errors, rwbs, _cmd_length, cmd): + event = CTFWriter.Event(self.block_rq_complete) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_dev"), dev) + self.set_int(event.payload("_sector"), sector) + self.set_int(event.payload("_nr_sector"), nr_sector) + self.set_int(event.payload("_errors"), errors) + self.set_int(event.payload("_rwbs"), rwbs) + self.set_int(event.payload("__cmd_length"), _cmd_length) + self.set_char_array(event.payload("_cmd"), cmd) + self.stream.append_event(event) + self.stream.flush() + + def write_block_rq_issue(self, time_ms, cpu_id, dev, sector, nr_sector, + bytes, tid, rwbs, _cmd_length, cmd, comm): + event = CTFWriter.Event(self.block_rq_issue) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_dev"), dev) + self.set_int(event.payload("_sector"), sector) + self.set_int(event.payload("_nr_sector"), nr_sector) + self.set_int(event.payload("_bytes"), bytes) + self.set_int(event.payload("_tid"), tid) + self.set_int(event.payload("_rwbs"), rwbs) + self.set_int(event.payload("__cmd_length"), _cmd_length) + self.set_char_array(event.payload("_cmd"), cmd) + self.set_char_array(event.payload("_comm"), comm) + self.stream.append_event(event) + self.stream.flush() + + def write_net_dev_xmit(self, time_ms, cpu_id, skbaddr, rc, len, name): + event = CTFWriter.Event(self.net_dev_xmit) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_skbaddr"), skbaddr) + self.set_int(event.payload("_rc"), rc) + self.set_int(event.payload("_len"), len) + self.set_string(event.payload("_name"), name) + self.stream.append_event(event) + self.stream.flush() + + def write_netif_receive_skb(self, time_ms, cpu_id, skbaddr, len, name): + event = CTFWriter.Event(self.netif_receive_skb) + self.clock.time = time_ms * 1000000 + self.set_int(event.payload("_cpu_id"), cpu_id) + self.set_int(event.payload("_skbaddr"), skbaddr) + self.set_int(event.payload("_len"), len) + self.set_string(event.payload("_name"), name) + self.stream.append_event(event) + self.stream.flush() + + def write_sched_switch(self, time_ms, cpu_id, prev_comm, prev_tid, + next_comm, next_tid, prev_prio=20, prev_state=1, + next_prio=20): + event = CTFWriter.Event(self.sched_switch) + self.clock.time = time_ms * 1000000 + self.set_char_array(event.payload("_prev_comm"), prev_comm) + self.set_int(event.payload("_prev_tid"), prev_tid) + self.set_int(event.payload("_prev_prio"), prev_prio) + self.set_int(event.payload("_prev_state"), prev_state) + self.set_char_array(event.payload("_next_comm"), next_comm) + self.set_int(event.payload("_next_tid"), next_tid) + self.set_int(event.payload("_next_prio"), next_prio) + self.set_int(event.payload("_cpu_id"), cpu_id) + self.stream.append_event(event) + self.stream.flush() + + def sched_switch_50pc(self, start_time_ms, end_time_ms, cpu_id, period, + comm1, tid1, comm2, tid2): + current = start_time_ms + while current < end_time_ms: + self.write_sched_switch(current, cpu_id, comm1, tid1, comm2, tid2) + current += period + self.write_sched_switch(current, cpu_id, comm2, tid2, comm1, tid1) + current += period