blob: 86e18575c9beee8eab168a088b716df37066d64d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnar0a02ad92009-09-11 12:12:54 +02002#include "builtin.h"
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02003#include "perf.h"
Arnaldo Carvalho de Melo91854f92019-08-29 14:59:50 -03004#include "perf-sys.h"
Ingo Molnar0a02ad92009-09-11 12:12:54 +02005
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +01006#include "util/cpumap.h"
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02007#include "util/evlist.h"
Arnaldo Carvalho de Meloe3f42602011-11-16 17:02:54 -02008#include "util/evsel.h"
Arnaldo Carvalho de Meloca125272019-09-24 15:41:51 -03009#include "util/evsel_fprintf.h"
Ian Rogers0bd14ac2022-08-26 09:42:32 -070010#include "util/mutex.h"
Ingo Molnar0a02ad92009-09-11 12:12:54 +020011#include "util/symbol.h"
12#include "util/thread.h"
13#include "util/header.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020014#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020015#include "util/tool.h"
Yann Droneaud57480d22014-06-30 22:28:47 +020016#include "util/cloexec.h"
Jiri Olsaa151a372016-04-12 15:29:29 +020017#include "util/thread_map.h"
Jiri Olsa8cd91192016-04-12 15:29:27 +020018#include "util/color.h"
David Ahern49394a22016-11-16 15:06:29 +090019#include "util/stat.h"
Arnaldo Carvalho de Melo6a9fa4e2019-06-25 17:31:26 -030020#include "util/string2.h"
David Ahern6c973c92016-11-16 15:06:32 +090021#include "util/callchain.h"
David Ahern853b7402016-11-29 10:15:44 -070022#include "util/time-utils.h"
Ingo Molnar0a02ad92009-09-11 12:12:54 +020023
Arnaldo Carvalho de Melofa0d9842019-08-30 12:52:25 -030024#include <subcmd/pager.h>
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060025#include <subcmd/parse-options.h>
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020026#include "util/trace-event.h"
Ingo Molnar0a02ad92009-09-11 12:12:54 +020027
Ingo Molnar0a02ad92009-09-11 12:12:54 +020028#include "util/debug.h"
Arnaldo Carvalho de Melof12be042019-09-18 10:11:20 -030029#include "util/event.h"
Ingo Molnar0a02ad92009-09-11 12:12:54 +020030
Arnaldo Carvalho de Melo877a7a12017-04-17 11:39:06 -030031#include <linux/kernel.h>
David Ahern49394a22016-11-16 15:06:29 +090032#include <linux/log2.h>
Arnaldo Carvalho de Melo7f7c5362019-07-04 11:32:27 -030033#include <linux/zalloc.h>
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020034#include <sys/prctl.h>
Markus Trippelsdorf7b78f132012-04-04 10:45:27 +020035#include <sys/resource.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030036#include <inttypes.h>
Ingo Molnar0a02ad92009-09-11 12:12:54 +020037
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030038#include <errno.h>
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020039#include <semaphore.h>
40#include <pthread.h>
41#include <math.h>
Yunlong Songcb06ac22015-03-31 21:46:30 +080042#include <api/fs/fs.h>
Arnaldo Carvalho de Melo87ffb6c2019-09-10 16:29:02 +010043#include <perf/cpumap.h>
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -030044#include <linux/time64.h>
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +053045#include <linux/err.h>
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +020046
Arnaldo Carvalho de Melo3052ba52019-06-25 17:27:31 -030047#include <linux/ctype.h>
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030048
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020049#define PR_SET_NAME 15 /* Set process name */
50#define MAX_CPUS 4096
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020051#define COMM_LEN 20
52#define SYM_LEN 129
Yunlong Songa35e27d2015-03-31 21:46:29 +080053#define MAX_PID 1024000
Ingo Molnarec156762009-09-11 12:12:54 +020054
David Ahernc30d6302019-12-04 10:39:25 -070055static const char *cpu_list;
56static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
57
mingo39aeb522009-09-14 20:04:48 +020058struct sched_atom;
Ingo Molnarec156762009-09-11 12:12:54 +020059
60struct task_desc {
61 unsigned long nr;
62 unsigned long pid;
63 char comm[COMM_LEN];
64
65 unsigned long nr_events;
66 unsigned long curr_event;
mingo39aeb522009-09-14 20:04:48 +020067 struct sched_atom **atoms;
Ingo Molnarec156762009-09-11 12:12:54 +020068
69 pthread_t thread;
70 sem_t sleep_sem;
71
72 sem_t ready_for_work;
73 sem_t work_done_sem;
74
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020075 u64 cpu_usage;
Ingo Molnarec156762009-09-11 12:12:54 +020076};
77
78enum sched_event_type {
79 SCHED_EVENT_RUN,
80 SCHED_EVENT_SLEEP,
81 SCHED_EVENT_WAKEUP,
Mike Galbraith55ffb7a2009-10-10 14:46:04 +020082 SCHED_EVENT_MIGRATION,
Ingo Molnarec156762009-09-11 12:12:54 +020083};
84
mingo39aeb522009-09-14 20:04:48 +020085struct sched_atom {
Ingo Molnarec156762009-09-11 12:12:54 +020086 enum sched_event_type type;
Arnaldo Carvalho de Meloeed05fe2010-04-05 12:53:45 -030087 int specific_wait;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020088 u64 timestamp;
89 u64 duration;
Ingo Molnarec156762009-09-11 12:12:54 +020090 unsigned long nr;
Ingo Molnarec156762009-09-11 12:12:54 +020091 sem_t *wait_sem;
92 struct task_desc *wakee;
93};
94
Dongshenge936e8e2014-05-05 16:05:54 +090095#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +020096
Namhyung Kim941bdea2017-01-13 19:45:21 +090097/* task state bitmask, copied from include/linux/sched.h */
98#define TASK_RUNNING 0
99#define TASK_INTERRUPTIBLE 1
100#define TASK_UNINTERRUPTIBLE 2
101#define __TASK_STOPPED 4
102#define __TASK_TRACED 8
103/* in tsk->exit_state */
104#define EXIT_DEAD 16
105#define EXIT_ZOMBIE 32
106#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
107/* in tsk->state again */
108#define TASK_DEAD 64
109#define TASK_WAKEKILL 128
110#define TASK_WAKING 256
111#define TASK_PARKED 512
112
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200113enum thread_state {
114 THREAD_SLEEPING = 0,
115 THREAD_WAIT_CPU,
116 THREAD_SCHED_IN,
117 THREAD_IGNORE
118};
119
120struct work_atom {
121 struct list_head list;
122 enum thread_state state;
Frederic Weisbeckeraa1ab9d2009-09-14 03:01:12 +0200123 u64 sched_out_time;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200124 u64 wake_up_time;
125 u64 sched_in_time;
126 u64 runtime;
127};
128
mingo39aeb522009-09-14 20:04:48 +0200129struct work_atoms {
130 struct list_head work_list;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200131 struct thread *thread;
132 struct rb_node node;
133 u64 max_lat;
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -0400134 u64 max_lat_start;
135 u64 max_lat_end;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200136 u64 total_lat;
137 u64 nb_atoms;
138 u64 total_runtime;
Josef Bacik2f80dd42015-05-22 09:18:40 -0400139 int num_merged;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200140};
141
mingo39aeb522009-09-14 20:04:48 +0200142typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200143
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300144struct perf_sched;
145
146struct trace_sched_handler {
Jiri Olsa32dcd022019-07-21 13:23:51 +0200147 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300148 struct perf_sample *sample, struct machine *machine);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300149
Jiri Olsa32dcd022019-07-21 13:23:51 +0200150 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300151 struct perf_sample *sample, struct machine *machine);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300152
Jiri Olsa32dcd022019-07-21 13:23:51 +0200153 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300154 struct perf_sample *sample, struct machine *machine);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300155
David Aherncb627502013-08-07 22:50:47 -0400156 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
157 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
158 struct machine *machine);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300159
160 int (*migrate_task_event)(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200161 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300162 struct perf_sample *sample,
163 struct machine *machine);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300164};
165
Jiri Olsaa151a372016-04-12 15:29:29 +0200166#define COLOR_PIDS PERF_COLOR_BLUE
Jiri Olsacf294f22016-04-12 15:29:30 +0200167#define COLOR_CPUS PERF_COLOR_BG_RED
Jiri Olsaa151a372016-04-12 15:29:29 +0200168
Jiri Olsa99623c62016-04-12 15:29:26 +0200169struct perf_sched_map {
170 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
Ian Rogers6d188042022-01-04 22:13:51 -0800171 struct perf_cpu *comp_cpus;
Jiri Olsa99623c62016-04-12 15:29:26 +0200172 bool comp;
Jiri Olsa9749b902019-07-21 13:23:50 +0200173 struct perf_thread_map *color_pids;
Jiri Olsaa151a372016-04-12 15:29:29 +0200174 const char *color_pids_str;
Jiri Olsaf8548392019-07-21 13:23:49 +0200175 struct perf_cpu_map *color_cpus;
Jiri Olsacf294f22016-04-12 15:29:30 +0200176 const char *color_cpus_str;
Jiri Olsaf8548392019-07-21 13:23:49 +0200177 struct perf_cpu_map *cpus;
Jiri Olsa73643bb2016-04-12 15:29:31 +0200178 const char *cpus_str;
Jiri Olsa99623c62016-04-12 15:29:26 +0200179};
180
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300181struct perf_sched {
182 struct perf_tool tool;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300183 const char *sort_order;
184 unsigned long nr_tasks;
Yunlong Songcb06ac22015-03-31 21:46:30 +0800185 struct task_desc **pid_to_task;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300186 struct task_desc **tasks;
187 const struct trace_sched_handler *tp_handler;
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700188 struct mutex start_work_mutex;
189 struct mutex work_done_wait_mutex;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300190 int profile_cpu;
191/*
192 * Track the current task - that way we can know whether there's any
193 * weird events, such as a task being switched away that is not current.
194 */
Ian Rogers6d188042022-01-04 22:13:51 -0800195 struct perf_cpu max_cpu;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300196 u32 curr_pid[MAX_CPUS];
197 struct thread *curr_thread[MAX_CPUS];
198 char next_shortname1;
199 char next_shortname2;
200 unsigned int replay_repeat;
201 unsigned long nr_run_events;
202 unsigned long nr_sleep_events;
203 unsigned long nr_wakeup_events;
204 unsigned long nr_sleep_corrections;
205 unsigned long nr_run_events_optimized;
206 unsigned long targetless_wakeups;
207 unsigned long multitarget_wakeups;
208 unsigned long nr_runs;
209 unsigned long nr_timestamps;
210 unsigned long nr_unordered_timestamps;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300211 unsigned long nr_context_switch_bugs;
212 unsigned long nr_events;
213 unsigned long nr_lost_chunks;
214 unsigned long nr_lost_events;
215 u64 run_measurement_overhead;
216 u64 sleep_measurement_overhead;
217 u64 start_time;
218 u64 cpu_usage;
219 u64 runavg_cpu_usage;
220 u64 parent_cpu_usage;
221 u64 runavg_parent_cpu_usage;
222 u64 sum_runtime;
223 u64 sum_fluct;
224 u64 run_avg;
225 u64 all_runtime;
226 u64 all_count;
227 u64 cpu_last_switched[MAX_CPUS];
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -0800228 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300229 struct list_head sort_list, cmp_pid;
Yunlong Song939cda52015-03-31 21:46:34 +0800230 bool force;
Josef Bacik2f80dd42015-05-22 09:18:40 -0400231 bool skip_merge;
Jiri Olsa99623c62016-04-12 15:29:26 +0200232 struct perf_sched_map map;
David Ahern52df1382016-11-16 15:06:30 +0900233
234 /* options for timehist command */
235 bool summary;
236 bool summary_only;
Namhyung Kim699b5b92016-12-08 23:47:52 +0900237 bool idle_hist;
David Ahern6c973c92016-11-16 15:06:32 +0900238 bool show_callchain;
239 unsigned int max_stack;
David Aherna407b062016-11-16 15:06:33 +0900240 bool show_cpu_visual;
David Ahernfc1469f2016-11-16 15:06:31 +0900241 bool show_wakeups;
Brendan Gregg292c4a82017-03-14 01:56:29 +0000242 bool show_next;
David Ahern350f54f2016-11-25 09:28:41 -0700243 bool show_migrations;
Namhyung Kim414e0502017-01-13 19:45:22 +0900244 bool show_state;
David Ahern52df1382016-11-16 15:06:30 +0900245 u64 skipped_samples;
David Ahern853b7402016-11-29 10:15:44 -0700246 const char *time_str;
247 struct perf_time_interval ptime;
Namhyung Kim9396c9c2016-12-22 15:03:50 +0900248 struct perf_time_interval hist_time;
Ian Rogers59c26662022-08-26 09:42:40 -0700249 volatile bool thread_funcs_exit;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300250};
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200251
David Ahern49394a22016-11-16 15:06:29 +0900252/* per thread run time data */
253struct thread_runtime {
254 u64 last_time; /* time of previous sched in/out event */
255 u64 dt_run; /* run time */
Namhyung Kim941bdea2017-01-13 19:45:21 +0900256 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
257 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
258 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
David Ahern49394a22016-11-16 15:06:29 +0900259 u64 dt_delay; /* time between wakeup and sched-in */
260 u64 ready_to_run; /* time of wakeup */
261
262 struct stats run_stats;
263 u64 total_run_time;
Namhyung Kim587782c2017-01-13 19:45:23 +0900264 u64 total_sleep_time;
265 u64 total_iowait_time;
266 u64 total_preempt_time;
267 u64 total_delay_time;
David Ahern350f54f2016-11-25 09:28:41 -0700268
Namhyung Kim941bdea2017-01-13 19:45:21 +0900269 int last_state;
Changbin Du8640da92018-03-06 11:37:36 +0800270
271 char shortname[3];
Changbin Du99a3c3a2018-03-06 11:37:37 +0800272 bool comm_changed;
273
David Ahern350f54f2016-11-25 09:28:41 -0700274 u64 migrations;
David Ahern49394a22016-11-16 15:06:29 +0900275};
276
277/* per event run time data */
278struct evsel_runtime {
279 u64 *last_time; /* time this event was last seen per cpu */
280 u32 ncpu; /* highest cpu slot allocated */
281};
282
Namhyung Kim3bc2fa92016-12-08 23:47:51 +0900283/* per cpu idle time data */
284struct idle_thread_runtime {
285 struct thread_runtime tr;
286 struct thread *last_thread;
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -0800287 struct rb_root_cached sorted_root;
Namhyung Kim3bc2fa92016-12-08 23:47:51 +0900288 struct callchain_root callchain;
289 struct callchain_cursor cursor;
290};
291
David Ahern49394a22016-11-16 15:06:29 +0900292/* track idle times per cpu */
293static struct thread **idle_threads;
294static int idle_max_cpu;
295static char idle_comm[] = "<idle>";
296
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200297static u64 get_nsecs(void)
298{
299 struct timespec ts;
300
301 clock_gettime(CLOCK_MONOTONIC, &ts);
302
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300303 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200304}
305
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300306static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200307{
308 u64 T0 = get_nsecs(), T1;
309
310 do {
311 T1 = get_nsecs();
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300312 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200313}
314
315static void sleep_nsecs(u64 nsecs)
316{
317 struct timespec ts;
318
319 ts.tv_nsec = nsecs % 999999999;
320 ts.tv_sec = nsecs / 999999999;
321
322 nanosleep(&ts, NULL);
323}
324
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300325static void calibrate_run_measurement_overhead(struct perf_sched *sched)
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200326{
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300327 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200328 int i;
329
330 for (i = 0; i < 10; i++) {
331 T0 = get_nsecs();
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300332 burn_nsecs(sched, 0);
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200333 T1 = get_nsecs();
334 delta = T1-T0;
335 min_delta = min(min_delta, delta);
336 }
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300337 sched->run_measurement_overhead = min_delta;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200338
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200339 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200340}
341
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300342static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200343{
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300344 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200345 int i;
346
347 for (i = 0; i < 10; i++) {
348 T0 = get_nsecs();
349 sleep_nsecs(10000);
350 T1 = get_nsecs();
351 delta = T1-T0;
352 min_delta = min(min_delta, delta);
353 }
354 min_delta -= 10000;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300355 sched->sleep_measurement_overhead = min_delta;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200356
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200357 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200358}
359
mingo39aeb522009-09-14 20:04:48 +0200360static struct sched_atom *
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200361get_new_event(struct task_desc *task, u64 timestamp)
Ingo Molnarec156762009-09-11 12:12:54 +0200362{
Arnaldo Carvalho de Melo36479482009-11-24 12:05:16 -0200363 struct sched_atom *event = zalloc(sizeof(*event));
Ingo Molnarec156762009-09-11 12:12:54 +0200364 unsigned long idx = task->nr_events;
365 size_t size;
366
367 event->timestamp = timestamp;
368 event->nr = idx;
369
370 task->nr_events++;
mingo39aeb522009-09-14 20:04:48 +0200371 size = sizeof(struct sched_atom *) * task->nr_events;
372 task->atoms = realloc(task->atoms, size);
373 BUG_ON(!task->atoms);
Ingo Molnarec156762009-09-11 12:12:54 +0200374
mingo39aeb522009-09-14 20:04:48 +0200375 task->atoms[idx] = event;
Ingo Molnarec156762009-09-11 12:12:54 +0200376
377 return event;
378}
379
mingo39aeb522009-09-14 20:04:48 +0200380static struct sched_atom *last_event(struct task_desc *task)
Ingo Molnarec156762009-09-11 12:12:54 +0200381{
382 if (!task->nr_events)
383 return NULL;
384
mingo39aeb522009-09-14 20:04:48 +0200385 return task->atoms[task->nr_events - 1];
Ingo Molnarec156762009-09-11 12:12:54 +0200386}
387
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300388static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
389 u64 timestamp, u64 duration)
Ingo Molnarec156762009-09-11 12:12:54 +0200390{
mingo39aeb522009-09-14 20:04:48 +0200391 struct sched_atom *event, *curr_event = last_event(task);
Ingo Molnarec156762009-09-11 12:12:54 +0200392
393 /*
Ingo Molnarfbf94822009-09-11 12:12:54 +0200394 * optimize an existing RUN event by merging this one
395 * to it:
396 */
Ingo Molnarec156762009-09-11 12:12:54 +0200397 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300398 sched->nr_run_events_optimized++;
Ingo Molnarec156762009-09-11 12:12:54 +0200399 curr_event->duration += duration;
400 return;
401 }
402
403 event = get_new_event(task, timestamp);
404
405 event->type = SCHED_EVENT_RUN;
406 event->duration = duration;
407
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300408 sched->nr_run_events++;
Ingo Molnarec156762009-09-11 12:12:54 +0200409}
410
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300411static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
412 u64 timestamp, struct task_desc *wakee)
Ingo Molnarec156762009-09-11 12:12:54 +0200413{
mingo39aeb522009-09-14 20:04:48 +0200414 struct sched_atom *event, *wakee_event;
Ingo Molnarec156762009-09-11 12:12:54 +0200415
416 event = get_new_event(task, timestamp);
417 event->type = SCHED_EVENT_WAKEUP;
418 event->wakee = wakee;
419
420 wakee_event = last_event(wakee);
421 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300422 sched->targetless_wakeups++;
Ingo Molnarec156762009-09-11 12:12:54 +0200423 return;
424 }
425 if (wakee_event->wait_sem) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300426 sched->multitarget_wakeups++;
Ingo Molnarec156762009-09-11 12:12:54 +0200427 return;
428 }
429
Arnaldo Carvalho de Melo36479482009-11-24 12:05:16 -0200430 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
Ingo Molnarec156762009-09-11 12:12:54 +0200431 sem_init(wakee_event->wait_sem, 0, 0);
432 wakee_event->specific_wait = 1;
433 event->wait_sem = wakee_event->wait_sem;
434
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300435 sched->nr_wakeup_events++;
Ingo Molnarec156762009-09-11 12:12:54 +0200436}
437
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300438static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
439 u64 timestamp, u64 task_state __maybe_unused)
Ingo Molnarec156762009-09-11 12:12:54 +0200440{
mingo39aeb522009-09-14 20:04:48 +0200441 struct sched_atom *event = get_new_event(task, timestamp);
Ingo Molnarec156762009-09-11 12:12:54 +0200442
443 event->type = SCHED_EVENT_SLEEP;
444
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300445 sched->nr_sleep_events++;
Ingo Molnarec156762009-09-11 12:12:54 +0200446}
447
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300448static struct task_desc *register_pid(struct perf_sched *sched,
449 unsigned long pid, const char *comm)
Ingo Molnarec156762009-09-11 12:12:54 +0200450{
451 struct task_desc *task;
Yunlong Songcb06ac22015-03-31 21:46:30 +0800452 static int pid_max;
Ingo Molnarec156762009-09-11 12:12:54 +0200453
Yunlong Songcb06ac22015-03-31 21:46:30 +0800454 if (sched->pid_to_task == NULL) {
455 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
456 pid_max = MAX_PID;
457 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
458 }
Yunlong Song3a423a52015-03-31 21:46:31 +0800459 if (pid >= (unsigned long)pid_max) {
460 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
461 sizeof(struct task_desc *))) == NULL);
462 while (pid >= (unsigned long)pid_max)
463 sched->pid_to_task[pid_max++] = NULL;
464 }
Ingo Molnarec156762009-09-11 12:12:54 +0200465
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300466 task = sched->pid_to_task[pid];
Ingo Molnarec156762009-09-11 12:12:54 +0200467
468 if (task)
469 return task;
470
Arnaldo Carvalho de Melo36479482009-11-24 12:05:16 -0200471 task = zalloc(sizeof(*task));
Ingo Molnarec156762009-09-11 12:12:54 +0200472 task->pid = pid;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300473 task->nr = sched->nr_tasks;
Ingo Molnarec156762009-09-11 12:12:54 +0200474 strcpy(task->comm, comm);
475 /*
476 * every task starts in sleeping state - this gets ignored
477 * if there's no wakeup pointing to this sleep state:
478 */
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300479 add_sched_event_sleep(sched, task, 0, 0);
Ingo Molnarec156762009-09-11 12:12:54 +0200480
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300481 sched->pid_to_task[pid] = task;
482 sched->nr_tasks++;
Yunlong Song0755bc42015-03-31 21:46:28 +0800483 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300484 BUG_ON(!sched->tasks);
485 sched->tasks[task->nr] = task;
Ingo Molnarec156762009-09-11 12:12:54 +0200486
Namhyung Kimbb963e12017-02-17 17:17:38 +0900487 if (verbose > 0)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300488 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
Ingo Molnarec156762009-09-11 12:12:54 +0200489
490 return task;
491}
492
493
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300494static void print_task_traces(struct perf_sched *sched)
Ingo Molnarec156762009-09-11 12:12:54 +0200495{
496 struct task_desc *task;
497 unsigned long i;
498
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300499 for (i = 0; i < sched->nr_tasks; i++) {
500 task = sched->tasks[i];
Ingo Molnarad236fd2009-09-11 12:12:54 +0200501 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
Ingo Molnarec156762009-09-11 12:12:54 +0200502 task->nr, task->comm, task->pid, task->nr_events);
503 }
504}
505
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300506static void add_cross_task_wakeups(struct perf_sched *sched)
Ingo Molnarec156762009-09-11 12:12:54 +0200507{
508 struct task_desc *task1, *task2;
509 unsigned long i, j;
510
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300511 for (i = 0; i < sched->nr_tasks; i++) {
512 task1 = sched->tasks[i];
Ingo Molnarec156762009-09-11 12:12:54 +0200513 j = i + 1;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300514 if (j == sched->nr_tasks)
Ingo Molnarec156762009-09-11 12:12:54 +0200515 j = 0;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300516 task2 = sched->tasks[j];
517 add_sched_event_wakeup(sched, task1, 0, task2);
Ingo Molnarec156762009-09-11 12:12:54 +0200518 }
519}
520
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300521static void perf_sched__process_event(struct perf_sched *sched,
522 struct sched_atom *atom)
Ingo Molnarec156762009-09-11 12:12:54 +0200523{
524 int ret = 0;
Ingo Molnarec156762009-09-11 12:12:54 +0200525
mingo39aeb522009-09-14 20:04:48 +0200526 switch (atom->type) {
Ingo Molnarec156762009-09-11 12:12:54 +0200527 case SCHED_EVENT_RUN:
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300528 burn_nsecs(sched, atom->duration);
Ingo Molnarec156762009-09-11 12:12:54 +0200529 break;
530 case SCHED_EVENT_SLEEP:
mingo39aeb522009-09-14 20:04:48 +0200531 if (atom->wait_sem)
532 ret = sem_wait(atom->wait_sem);
Ingo Molnarec156762009-09-11 12:12:54 +0200533 BUG_ON(ret);
534 break;
535 case SCHED_EVENT_WAKEUP:
mingo39aeb522009-09-14 20:04:48 +0200536 if (atom->wait_sem)
537 ret = sem_post(atom->wait_sem);
Ingo Molnarec156762009-09-11 12:12:54 +0200538 BUG_ON(ret);
539 break;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +0200540 case SCHED_EVENT_MIGRATION:
541 break;
Ingo Molnarec156762009-09-11 12:12:54 +0200542 default:
543 BUG_ON(1);
544 }
545}
546
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200547static u64 get_cpu_usage_nsec_parent(void)
Ingo Molnarec156762009-09-11 12:12:54 +0200548{
549 struct rusage ru;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200550 u64 sum;
Ingo Molnarec156762009-09-11 12:12:54 +0200551 int err;
552
553 err = getrusage(RUSAGE_SELF, &ru);
554 BUG_ON(err);
555
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300556 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
557 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
Ingo Molnarec156762009-09-11 12:12:54 +0200558
559 return sum;
560}
561
Yunlong Song939cda52015-03-31 21:46:34 +0800562static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
Ingo Molnarec156762009-09-11 12:12:54 +0200563{
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800564 struct perf_event_attr attr;
Yunlong Song939cda52015-03-31 21:46:34 +0800565 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800566 int fd;
Yunlong Song939cda52015-03-31 21:46:34 +0800567 struct rlimit limit;
568 bool need_privilege = false;
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800569
570 memset(&attr, 0, sizeof(attr));
571
572 attr.type = PERF_TYPE_SOFTWARE;
573 attr.config = PERF_COUNT_SW_TASK_CLOCK;
574
Yunlong Song939cda52015-03-31 21:46:34 +0800575force_again:
Yann Droneaud57480d22014-06-30 22:28:47 +0200576 fd = sys_perf_event_open(&attr, 0, -1, -1,
577 perf_event_open_cloexec_flag());
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800578
Yunlong Song1aff59b2015-03-31 21:46:33 +0800579 if (fd < 0) {
Yunlong Song939cda52015-03-31 21:46:34 +0800580 if (errno == EMFILE) {
581 if (sched->force) {
582 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
583 limit.rlim_cur += sched->nr_tasks - cur_task;
584 if (limit.rlim_cur > limit.rlim_max) {
585 limit.rlim_max = limit.rlim_cur;
586 need_privilege = true;
587 }
588 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
589 if (need_privilege && errno == EPERM)
590 strcpy(info, "Need privilege\n");
591 } else
592 goto force_again;
593 } else
594 strcpy(info, "Have a try with -f option\n");
595 }
Namhyung Kim60b7d142012-09-12 11:11:06 +0900596 pr_err("Error: sys_perf_event_open() syscall returned "
Yunlong Song939cda52015-03-31 21:46:34 +0800597 "with %d (%s)\n%s", fd,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300598 str_error_r(errno, sbuf, sizeof(sbuf)), info);
Yunlong Song1aff59b2015-03-31 21:46:33 +0800599 exit(EXIT_FAILURE);
600 }
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800601 return fd;
602}
603
604static u64 get_cpu_usage_nsec_self(int fd)
605{
606 u64 runtime;
Ingo Molnarec156762009-09-11 12:12:54 +0200607 int ret;
608
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800609 ret = read(fd, &runtime, sizeof(runtime));
610 BUG_ON(ret != sizeof(runtime));
Ingo Molnarec156762009-09-11 12:12:54 +0200611
Xiao Guangrongc0c9e722009-12-09 17:51:30 +0800612 return runtime;
Ingo Molnarec156762009-09-11 12:12:54 +0200613}
614
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300615struct sched_thread_parms {
616 struct task_desc *task;
617 struct perf_sched *sched;
Yunlong Song08097ab2015-03-31 21:46:32 +0800618 int fd;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300619};
620
Ingo Molnarec156762009-09-11 12:12:54 +0200621static void *thread_func(void *ctx)
622{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300623 struct sched_thread_parms *parms = ctx;
624 struct task_desc *this_task = parms->task;
625 struct perf_sched *sched = parms->sched;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200626 u64 cpu_usage_0, cpu_usage_1;
Ingo Molnarec156762009-09-11 12:12:54 +0200627 unsigned long i, ret;
628 char comm2[22];
Yunlong Song08097ab2015-03-31 21:46:32 +0800629 int fd = parms->fd;
Ingo Molnarec156762009-09-11 12:12:54 +0200630
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300631 zfree(&parms);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300632
Ingo Molnarec156762009-09-11 12:12:54 +0200633 sprintf(comm2, ":%s", this_task->comm);
634 prctl(PR_SET_NAME, comm2);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300635 if (fd < 0)
636 return NULL;
Ingo Molnarec156762009-09-11 12:12:54 +0200637
Ian Rogers59c26662022-08-26 09:42:40 -0700638 while (!sched->thread_funcs_exit) {
639 ret = sem_post(&this_task->ready_for_work);
640 BUG_ON(ret);
641 mutex_lock(&sched->start_work_mutex);
642 mutex_unlock(&sched->start_work_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200643
Ian Rogers59c26662022-08-26 09:42:40 -0700644 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
645
646 for (i = 0; i < this_task->nr_events; i++) {
647 this_task->curr_event = i;
648 perf_sched__process_event(sched, this_task->atoms[i]);
649 }
650
651 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
652 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
653 ret = sem_post(&this_task->work_done_sem);
654 BUG_ON(ret);
655
656 mutex_lock(&sched->work_done_wait_mutex);
657 mutex_unlock(&sched->work_done_wait_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200658 }
Ian Rogers59c26662022-08-26 09:42:40 -0700659 return NULL;
Ingo Molnarec156762009-09-11 12:12:54 +0200660}
661
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300662static void create_tasks(struct perf_sched *sched)
Ian Rogers59c26662022-08-26 09:42:40 -0700663 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
664 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
Ingo Molnarec156762009-09-11 12:12:54 +0200665{
666 struct task_desc *task;
667 pthread_attr_t attr;
668 unsigned long i;
669 int err;
670
671 err = pthread_attr_init(&attr);
672 BUG_ON(err);
Jiri Pirko12f7e032011-01-10 14:14:23 -0200673 err = pthread_attr_setstacksize(&attr,
Arnaldo Carvalho de Melod08c84e2021-07-14 13:06:38 -0300674 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
Ingo Molnarec156762009-09-11 12:12:54 +0200675 BUG_ON(err);
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700676 mutex_lock(&sched->start_work_mutex);
677 mutex_lock(&sched->work_done_wait_mutex);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300678 for (i = 0; i < sched->nr_tasks; i++) {
679 struct sched_thread_parms *parms = malloc(sizeof(*parms));
680 BUG_ON(parms == NULL);
681 parms->task = task = sched->tasks[i];
682 parms->sched = sched;
Yunlong Song939cda52015-03-31 21:46:34 +0800683 parms->fd = self_open_counters(sched, i);
Ingo Molnarec156762009-09-11 12:12:54 +0200684 sem_init(&task->sleep_sem, 0, 0);
685 sem_init(&task->ready_for_work, 0, 0);
686 sem_init(&task->work_done_sem, 0, 0);
687 task->curr_event = 0;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300688 err = pthread_create(&task->thread, &attr, thread_func, parms);
Ingo Molnarec156762009-09-11 12:12:54 +0200689 BUG_ON(err);
690 }
691}
692
Namhyung Kim165da802022-09-08 15:54:48 -0700693static void destroy_tasks(struct perf_sched *sched)
694 UNLOCK_FUNCTION(sched->start_work_mutex)
695 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
696{
697 struct task_desc *task;
698 unsigned long i;
699 int err;
700
701 mutex_unlock(&sched->start_work_mutex);
702 mutex_unlock(&sched->work_done_wait_mutex);
703 /* Get rid of threads so they won't be upset by mutex destrunction */
704 for (i = 0; i < sched->nr_tasks; i++) {
705 task = sched->tasks[i];
706 err = pthread_join(task->thread, NULL);
707 BUG_ON(err);
708 sem_destroy(&task->sleep_sem);
709 sem_destroy(&task->ready_for_work);
710 sem_destroy(&task->work_done_sem);
711 }
712}
713
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300714static void wait_for_tasks(struct perf_sched *sched)
Ian Rogers59c26662022-08-26 09:42:40 -0700715 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
716 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
Ingo Molnarec156762009-09-11 12:12:54 +0200717{
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200718 u64 cpu_usage_0, cpu_usage_1;
Ingo Molnarec156762009-09-11 12:12:54 +0200719 struct task_desc *task;
720 unsigned long i, ret;
721
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300722 sched->start_time = get_nsecs();
723 sched->cpu_usage = 0;
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700724 mutex_unlock(&sched->work_done_wait_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200725
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300726 for (i = 0; i < sched->nr_tasks; i++) {
727 task = sched->tasks[i];
Ingo Molnarec156762009-09-11 12:12:54 +0200728 ret = sem_wait(&task->ready_for_work);
729 BUG_ON(ret);
730 sem_init(&task->ready_for_work, 0, 0);
731 }
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700732 mutex_lock(&sched->work_done_wait_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200733
734 cpu_usage_0 = get_cpu_usage_nsec_parent();
735
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700736 mutex_unlock(&sched->start_work_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200737
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300738 for (i = 0; i < sched->nr_tasks; i++) {
739 task = sched->tasks[i];
Ingo Molnarec156762009-09-11 12:12:54 +0200740 ret = sem_wait(&task->work_done_sem);
741 BUG_ON(ret);
742 sem_init(&task->work_done_sem, 0, 0);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300743 sched->cpu_usage += task->cpu_usage;
Ingo Molnarec156762009-09-11 12:12:54 +0200744 task->cpu_usage = 0;
745 }
746
747 cpu_usage_1 = get_cpu_usage_nsec_parent();
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300748 if (!sched->runavg_cpu_usage)
749 sched->runavg_cpu_usage = sched->cpu_usage;
Yunlong Songff5f3bb2015-03-31 21:46:36 +0800750 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
Ingo Molnarec156762009-09-11 12:12:54 +0200751
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300752 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
753 if (!sched->runavg_parent_cpu_usage)
754 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
Yunlong Songff5f3bb2015-03-31 21:46:36 +0800755 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
756 sched->parent_cpu_usage)/sched->replay_repeat;
Ingo Molnarec156762009-09-11 12:12:54 +0200757
Ian Rogers0bd14ac2022-08-26 09:42:32 -0700758 mutex_lock(&sched->start_work_mutex);
Ingo Molnarec156762009-09-11 12:12:54 +0200759
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300760 for (i = 0; i < sched->nr_tasks; i++) {
761 task = sched->tasks[i];
Ingo Molnarec156762009-09-11 12:12:54 +0200762 sem_init(&task->sleep_sem, 0, 0);
763 task->curr_event = 0;
764 }
765}
766
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300767static void run_one_test(struct perf_sched *sched)
Ian Rogers59c26662022-08-26 09:42:40 -0700768 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
769 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
Ingo Molnarec156762009-09-11 12:12:54 +0200770{
Kyle McMartinfb7d0b32011-01-24 11:13:04 -0500771 u64 T0, T1, delta, avg_delta, fluct;
Ingo Molnarec156762009-09-11 12:12:54 +0200772
773 T0 = get_nsecs();
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300774 wait_for_tasks(sched);
Ingo Molnarec156762009-09-11 12:12:54 +0200775 T1 = get_nsecs();
776
777 delta = T1 - T0;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300778 sched->sum_runtime += delta;
779 sched->nr_runs++;
Ingo Molnarec156762009-09-11 12:12:54 +0200780
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300781 avg_delta = sched->sum_runtime / sched->nr_runs;
Ingo Molnarec156762009-09-11 12:12:54 +0200782 if (delta < avg_delta)
783 fluct = avg_delta - delta;
784 else
785 fluct = delta - avg_delta;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300786 sched->sum_fluct += fluct;
787 if (!sched->run_avg)
788 sched->run_avg = delta;
Yunlong Songff5f3bb2015-03-31 21:46:36 +0800789 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
Ingo Molnarec156762009-09-11 12:12:54 +0200790
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300791 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200792
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300793 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200794
Ingo Molnarad236fd2009-09-11 12:12:54 +0200795 printf("cpu: %0.2f / %0.2f",
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300796 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200797
798#if 0
799 /*
Ingo Molnarfbf94822009-09-11 12:12:54 +0200800 * rusage statistics done by the parent, these are less
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300801 * accurate than the sched->sum_exec_runtime based statistics:
Ingo Molnarfbf94822009-09-11 12:12:54 +0200802 */
Ingo Molnarad236fd2009-09-11 12:12:54 +0200803 printf(" [%0.2f / %0.2f]",
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300804 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
805 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200806#endif
807
Ingo Molnarad236fd2009-09-11 12:12:54 +0200808 printf("\n");
Ingo Molnarec156762009-09-11 12:12:54 +0200809
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300810 if (sched->nr_sleep_corrections)
811 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
812 sched->nr_sleep_corrections = 0;
Ingo Molnarec156762009-09-11 12:12:54 +0200813}
814
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300815static void test_calibrations(struct perf_sched *sched)
Ingo Molnarec156762009-09-11 12:12:54 +0200816{
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200817 u64 T0, T1;
Ingo Molnarec156762009-09-11 12:12:54 +0200818
819 T0 = get_nsecs();
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300820 burn_nsecs(sched, NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200821 T1 = get_nsecs();
822
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200823 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
Ingo Molnarec156762009-09-11 12:12:54 +0200824
825 T0 = get_nsecs();
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -0300826 sleep_nsecs(NSEC_PER_MSEC);
Ingo Molnarec156762009-09-11 12:12:54 +0200827 T1 = get_nsecs();
828
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200829 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
Ingo Molnarec156762009-09-11 12:12:54 +0200830}
831
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300832static int
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300833replay_wakeup_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200834 struct evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300835 struct machine *machine __maybe_unused)
Ingo Molnarec156762009-09-11 12:12:54 +0200836{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -0300837 const char *comm = evsel__strval(evsel, sample, "comm");
838 const u32 pid = evsel__intval(evsel, sample, "pid");
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200839 struct task_desc *waker, *wakee;
840
Namhyung Kimbb963e12017-02-17 17:17:38 +0900841 if (verbose > 0) {
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -0300842 printf("sched_wakeup event %p\n", evsel);
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200843
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300844 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200845 }
846
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -0300847 waker = register_pid(sched, sample->tid, "<unknown>");
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300848 wakee = register_pid(sched, pid, comm);
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200849
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300850 add_sched_event_wakeup(sched, waker, sample->time, wakee);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300851 return 0;
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200852}
853
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300854static int replay_switch_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +0200855 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300856 struct perf_sample *sample,
857 struct machine *machine __maybe_unused)
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200858{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -0300859 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
860 *next_comm = evsel__strval(evsel, sample, "next_comm");
861 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
862 next_pid = evsel__intval(evsel, sample, "next_pid");
863 const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300864 struct task_desc *prev, __maybe_unused *next;
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -0300865 u64 timestamp0, timestamp = sample->time;
866 int cpu = sample->cpu;
Ingo Molnarfbf94822009-09-11 12:12:54 +0200867 s64 delta;
868
Namhyung Kimbb963e12017-02-17 17:17:38 +0900869 if (verbose > 0)
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -0300870 printf("sched_switch event %p\n", evsel);
Ingo Molnarad236fd2009-09-11 12:12:54 +0200871
Ingo Molnarfbf94822009-09-11 12:12:54 +0200872 if (cpu >= MAX_CPUS || cpu < 0)
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300873 return 0;
Ingo Molnarfbf94822009-09-11 12:12:54 +0200874
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300875 timestamp0 = sched->cpu_last_switched[cpu];
Ingo Molnarfbf94822009-09-11 12:12:54 +0200876 if (timestamp0)
877 delta = timestamp - timestamp0;
878 else
879 delta = 0;
880
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300881 if (delta < 0) {
Namhyung Kim60b7d142012-09-12 11:11:06 +0900882 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300883 return -1;
884 }
Ingo Molnarfbf94822009-09-11 12:12:54 +0200885
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300886 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
887 prev_comm, prev_pid, next_comm, next_pid, delta);
Ingo Molnarfbf94822009-09-11 12:12:54 +0200888
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300889 prev = register_pid(sched, prev_pid, prev_comm);
890 next = register_pid(sched, next_pid, next_comm);
Ingo Molnarfbf94822009-09-11 12:12:54 +0200891
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300892 sched->cpu_last_switched[cpu] = timestamp;
Ingo Molnarfbf94822009-09-11 12:12:54 +0200893
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -0300894 add_sched_event_run(sched, prev, timestamp, delta);
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300895 add_sched_event_sleep(sched, prev, timestamp, prev_state);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300896
897 return 0;
Ingo Molnarfbf94822009-09-11 12:12:54 +0200898}
899
David Aherncb627502013-08-07 22:50:47 -0400900static int replay_fork_event(struct perf_sched *sched,
901 union perf_event *event,
902 struct machine *machine)
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200903{
David Aherncb627502013-08-07 22:50:47 -0400904 struct thread *child, *parent;
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300905
Adrian Hunter314add62013-08-27 11:23:03 +0300906 child = machine__findnew_thread(machine, event->fork.pid,
907 event->fork.tid);
908 parent = machine__findnew_thread(machine, event->fork.ppid,
909 event->fork.ptid);
David Aherncb627502013-08-07 22:50:47 -0400910
911 if (child == NULL || parent == NULL) {
912 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
913 child, parent);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300914 goto out_put;
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200915 }
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -0300916
Namhyung Kimbb963e12017-02-17 17:17:38 +0900917 if (verbose > 0) {
David Aherncb627502013-08-07 22:50:47 -0400918 printf("fork event\n");
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +0200919 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
920 printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
David Aherncb627502013-08-07 22:50:47 -0400921 }
922
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +0200923 register_pid(sched, parent->tid, thread__comm_str(parent));
924 register_pid(sched, child->tid, thread__comm_str(child));
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300925out_put:
926 thread__put(child);
927 thread__put(parent);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -0300928 return 0;
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +0200929}
930
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200931struct sort_dimension {
932 const char *name;
Ingo Molnarb5fae122009-09-11 12:12:54 +0200933 sort_fn_t cmp;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +0200934 struct list_head list;
935};
936
Changbin Du8640da92018-03-06 11:37:36 +0800937/*
938 * handle runtime stats saved per thread
939 */
940static struct thread_runtime *thread__init_runtime(struct thread *thread)
941{
942 struct thread_runtime *r;
943
944 r = zalloc(sizeof(struct thread_runtime));
945 if (!r)
946 return NULL;
947
948 init_stats(&r->run_stats);
949 thread__set_priv(thread, r);
950
951 return r;
952}
953
954static struct thread_runtime *thread__get_runtime(struct thread *thread)
955{
956 struct thread_runtime *tr;
957
958 tr = thread__priv(thread);
959 if (tr == NULL) {
960 tr = thread__init_runtime(thread);
961 if (tr == NULL)
962 pr_debug("Failed to malloc memory for runtime data.\n");
963 }
964
965 return tr;
966}
967
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +0200968static int
mingo39aeb522009-09-14 20:04:48 +0200969thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +0200970{
971 struct sort_dimension *sort;
972 int ret = 0;
973
Ingo Molnarb5fae122009-09-11 12:12:54 +0200974 BUG_ON(list_empty(list));
975
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +0200976 list_for_each_entry(sort, list, list) {
977 ret = sort->cmp(l, r);
978 if (ret)
979 return ret;
980 }
981
982 return ret;
983}
984
mingo39aeb522009-09-14 20:04:48 +0200985static struct work_atoms *
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -0800986thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
Ingo Molnarb5fae122009-09-11 12:12:54 +0200987 struct list_head *sort_list)
988{
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -0800989 struct rb_node *node = root->rb_root.rb_node;
mingo39aeb522009-09-14 20:04:48 +0200990 struct work_atoms key = { .thread = thread };
Ingo Molnarb5fae122009-09-11 12:12:54 +0200991
992 while (node) {
mingo39aeb522009-09-14 20:04:48 +0200993 struct work_atoms *atoms;
Ingo Molnarb5fae122009-09-11 12:12:54 +0200994 int cmp;
995
mingo39aeb522009-09-14 20:04:48 +0200996 atoms = container_of(node, struct work_atoms, node);
Ingo Molnarb5fae122009-09-11 12:12:54 +0200997
998 cmp = thread_lat_cmp(sort_list, &key, atoms);
999 if (cmp > 0)
1000 node = node->rb_left;
1001 else if (cmp < 0)
1002 node = node->rb_right;
1003 else {
1004 BUG_ON(thread != atoms->thread);
1005 return atoms;
1006 }
1007 }
1008 return NULL;
1009}
1010
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001011static void
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001012__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001013 struct list_head *sort_list)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001014{
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001015 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1016 bool leftmost = true;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001017
1018 while (*new) {
mingo39aeb522009-09-14 20:04:48 +02001019 struct work_atoms *this;
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001020 int cmp;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001021
mingo39aeb522009-09-14 20:04:48 +02001022 this = container_of(*new, struct work_atoms, node);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001023 parent = *new;
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001024
1025 cmp = thread_lat_cmp(sort_list, data, this);
1026
1027 if (cmp > 0)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001028 new = &((*new)->rb_left);
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001029 else {
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001030 new = &((*new)->rb_right);
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001031 leftmost = false;
1032 }
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001033 }
1034
1035 rb_link_node(&data->node, parent, new);
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001036 rb_insert_color_cached(&data->node, root, leftmost);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001037}
1038
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001039static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001040{
Arnaldo Carvalho de Melo36479482009-11-24 12:05:16 -02001041 struct work_atoms *atoms = zalloc(sizeof(*atoms));
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001042 if (!atoms) {
1043 pr_err("No memory at %s\n", __func__);
1044 return -1;
1045 }
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001046
Arnaldo Carvalho de Melof3b623b2015-03-02 22:21:35 -03001047 atoms->thread = thread__get(thread);
mingo39aeb522009-09-14 20:04:48 +02001048 INIT_LIST_HEAD(&atoms->work_list);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001049 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001050 return 0;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001051}
1052
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001053static char sched_out_state(u64 prev_state)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001054{
1055 const char *str = TASK_STATE_TO_CHAR_STR;
1056
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001057 return str[prev_state];
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001058}
1059
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001060static int
mingo39aeb522009-09-14 20:04:48 +02001061add_sched_out_event(struct work_atoms *atoms,
1062 char run_state,
1063 u64 timestamp)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001064{
Arnaldo Carvalho de Melo36479482009-11-24 12:05:16 -02001065 struct work_atom *atom = zalloc(sizeof(*atom));
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001066 if (!atom) {
1067 pr_err("Non memory at %s", __func__);
1068 return -1;
1069 }
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001070
Frederic Weisbeckeraa1ab9d2009-09-14 03:01:12 +02001071 atom->sched_out_time = timestamp;
1072
mingo39aeb522009-09-14 20:04:48 +02001073 if (run_state == 'R') {
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001074 atom->state = THREAD_WAIT_CPU;
Frederic Weisbeckeraa1ab9d2009-09-14 03:01:12 +02001075 atom->wake_up_time = atom->sched_out_time;
Frederic Weisbeckerc6ced612009-09-13 00:46:19 +02001076 }
1077
mingo39aeb522009-09-14 20:04:48 +02001078 list_add_tail(&atom->list, &atoms->work_list);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001079 return 0;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001080}
1081
1082static void
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001083add_runtime_event(struct work_atoms *atoms, u64 delta,
1084 u64 timestamp __maybe_unused)
mingo39aeb522009-09-14 20:04:48 +02001085{
1086 struct work_atom *atom;
1087
1088 BUG_ON(list_empty(&atoms->work_list));
1089
1090 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1091
1092 atom->runtime += delta;
1093 atoms->total_runtime += delta;
1094}
1095
1096static void
1097add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001098{
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001099 struct work_atom *atom;
Frederic Weisbecker66685672009-09-13 01:56:25 +02001100 u64 delta;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001101
mingo39aeb522009-09-14 20:04:48 +02001102 if (list_empty(&atoms->work_list))
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001103 return;
1104
mingo39aeb522009-09-14 20:04:48 +02001105 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001106
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001107 if (atom->state != THREAD_WAIT_CPU)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001108 return;
1109
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001110 if (timestamp < atom->wake_up_time) {
1111 atom->state = THREAD_IGNORE;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001112 return;
1113 }
1114
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001115 atom->state = THREAD_SCHED_IN;
1116 atom->sched_in_time = timestamp;
Frederic Weisbecker66685672009-09-13 01:56:25 +02001117
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001118 delta = atom->sched_in_time - atom->wake_up_time;
Frederic Weisbecker66685672009-09-13 01:56:25 +02001119 atoms->total_lat += delta;
Frederic Weisbecker3786310a2009-12-09 21:40:08 +01001120 if (delta > atoms->max_lat) {
Frederic Weisbecker66685672009-09-13 01:56:25 +02001121 atoms->max_lat = delta;
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04001122 atoms->max_lat_start = atom->wake_up_time;
1123 atoms->max_lat_end = timestamp;
Frederic Weisbecker3786310a2009-12-09 21:40:08 +01001124 }
Frederic Weisbecker66685672009-09-13 01:56:25 +02001125 atoms->nb_atoms++;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001126}
1127
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001128static int latency_switch_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001129 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001130 struct perf_sample *sample,
1131 struct machine *machine)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001132{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001133 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1134 next_pid = evsel__intval(evsel, sample, "next_pid");
1135 const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
mingo39aeb522009-09-14 20:04:48 +02001136 struct work_atoms *out_events, *in_events;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001137 struct thread *sched_out, *sched_in;
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -03001138 u64 timestamp0, timestamp = sample->time;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001139 int cpu = sample->cpu, err = -1;
Ingo Molnarea92ed52009-09-12 10:08:34 +02001140 s64 delta;
1141
mingo39aeb522009-09-14 20:04:48 +02001142 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
Ingo Molnarea92ed52009-09-12 10:08:34 +02001143
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001144 timestamp0 = sched->cpu_last_switched[cpu];
1145 sched->cpu_last_switched[cpu] = timestamp;
Ingo Molnarea92ed52009-09-12 10:08:34 +02001146 if (timestamp0)
1147 delta = timestamp - timestamp0;
1148 else
1149 delta = 0;
1150
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001151 if (delta < 0) {
1152 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1153 return -1;
1154 }
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001155
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001156 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1157 sched_in = machine__findnew_thread(machine, -1, next_pid);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001158 if (sched_out == NULL || sched_in == NULL)
1159 goto out_put;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001160
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001161 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
mingo39aeb522009-09-14 20:04:48 +02001162 if (!out_events) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001163 if (thread_atoms_insert(sched, sched_out))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001164 goto out_put;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001165 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001166 if (!out_events) {
1167 pr_err("out-event: Internal tree error");
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001168 goto out_put;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001169 }
mingo39aeb522009-09-14 20:04:48 +02001170 }
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001171 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001172 return -1;
mingo39aeb522009-09-14 20:04:48 +02001173
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001174 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
mingo39aeb522009-09-14 20:04:48 +02001175 if (!in_events) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001176 if (thread_atoms_insert(sched, sched_in))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001177 goto out_put;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001178 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001179 if (!in_events) {
1180 pr_err("in-event: Internal tree error");
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001181 goto out_put;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001182 }
mingo39aeb522009-09-14 20:04:48 +02001183 /*
1184 * Take came in we have not heard about yet,
1185 * add in an initial atom in runnable state:
1186 */
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001187 if (add_sched_out_event(in_events, 'R', timestamp))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001188 goto out_put;
mingo39aeb522009-09-14 20:04:48 +02001189 }
1190 add_sched_in_event(in_events, timestamp);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001191 err = 0;
1192out_put:
1193 thread__put(sched_out);
1194 thread__put(sched_in);
1195 return err;
mingo39aeb522009-09-14 20:04:48 +02001196}
1197
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001198static int latency_runtime_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001199 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001200 struct perf_sample *sample,
1201 struct machine *machine)
mingo39aeb522009-09-14 20:04:48 +02001202{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001203 const u32 pid = evsel__intval(evsel, sample, "pid");
1204 const u64 runtime = evsel__intval(evsel, sample, "runtime");
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001205 struct thread *thread = machine__findnew_thread(machine, -1, pid);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001206 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -03001207 u64 timestamp = sample->time;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001208 int cpu = sample->cpu, err = -1;
1209
1210 if (thread == NULL)
1211 return -1;
mingo39aeb522009-09-14 20:04:48 +02001212
1213 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
mingo39aeb522009-09-14 20:04:48 +02001214 if (!atoms) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001215 if (thread_atoms_insert(sched, thread))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001216 goto out_put;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001217 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001218 if (!atoms) {
Namhyung Kim60b7d142012-09-12 11:11:06 +09001219 pr_err("in-event: Internal tree error");
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001220 goto out_put;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001221 }
1222 if (add_sched_out_event(atoms, 'R', timestamp))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001223 goto out_put;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001224 }
1225
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001226 add_runtime_event(atoms, runtime, timestamp);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001227 err = 0;
1228out_put:
1229 thread__put(thread);
1230 return err;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001231}
1232
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001233static int latency_wakeup_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001234 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001235 struct perf_sample *sample,
1236 struct machine *machine)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001237{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001238 const u32 pid = evsel__intval(evsel, sample, "pid");
mingo39aeb522009-09-14 20:04:48 +02001239 struct work_atoms *atoms;
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001240 struct work_atom *atom;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001241 struct thread *wakee;
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -03001242 u64 timestamp = sample->time;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001243 int err = -1;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001244
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001245 wakee = machine__findnew_thread(machine, -1, pid);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001246 if (wakee == NULL)
1247 return -1;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001248 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
Frederic Weisbecker17562202009-09-12 23:11:32 +02001249 if (!atoms) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001250 if (thread_atoms_insert(sched, wakee))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001251 goto out_put;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001252 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001253 if (!atoms) {
Namhyung Kim60b7d142012-09-12 11:11:06 +09001254 pr_err("wakeup-event: Internal tree error");
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001255 goto out_put;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001256 }
1257 if (add_sched_out_event(atoms, 'S', timestamp))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001258 goto out_put;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001259 }
1260
mingo39aeb522009-09-14 20:04:48 +02001261 BUG_ON(list_empty(&atoms->work_list));
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001262
mingo39aeb522009-09-14 20:04:48 +02001263 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001264
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001265 /*
Dongsheng Yang67d62592014-05-13 10:38:21 +09001266 * As we do not guarantee the wakeup event happens when
1267 * task is out of run queue, also may happen when task is
1268 * on run queue and wakeup only change ->state to TASK_RUNNING,
1269 * then we should not set the ->wake_up_time when wake up a
1270 * task which is on run queue.
1271 *
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001272 * You WILL be missing events if you've recorded only
1273 * one CPU, or are only looking at only one, so don't
Dongsheng Yang67d62592014-05-13 10:38:21 +09001274 * skip in this case.
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001275 */
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001276 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001277 goto out_ok;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001278
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001279 sched->nr_timestamps++;
Ingo Molnarea57c4f2009-09-13 18:15:54 +02001280 if (atom->sched_out_time > timestamp) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001281 sched->nr_unordered_timestamps++;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001282 goto out_ok;
Ingo Molnarea57c4f2009-09-13 18:15:54 +02001283 }
Frederic Weisbeckeraa1ab9d2009-09-14 03:01:12 +02001284
Ingo Molnarb1ffe8f2009-09-11 12:12:54 +02001285 atom->state = THREAD_WAIT_CPU;
1286 atom->wake_up_time = timestamp;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001287out_ok:
1288 err = 0;
1289out_put:
1290 thread__put(wakee);
1291 return err;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001292}
1293
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001294static int latency_migrate_task_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001295 struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001296 struct perf_sample *sample,
1297 struct machine *machine)
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001298{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001299 const u32 pid = evsel__intval(evsel, sample, "pid");
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -03001300 u64 timestamp = sample->time;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001301 struct work_atoms *atoms;
1302 struct work_atom *atom;
1303 struct thread *migrant;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001304 int err = -1;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001305
1306 /*
1307 * Only need to worry about migration when profiling one CPU.
1308 */
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001309 if (sched->profile_cpu == -1)
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001310 return 0;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001311
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001312 migrant = machine__findnew_thread(machine, -1, pid);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001313 if (migrant == NULL)
1314 return -1;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001315 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001316 if (!atoms) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001317 if (thread_atoms_insert(sched, migrant))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001318 goto out_put;
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +02001319 register_pid(sched, migrant->tid, thread__comm_str(migrant));
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001320 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001321 if (!atoms) {
Namhyung Kim60b7d142012-09-12 11:11:06 +09001322 pr_err("migration-event: Internal tree error");
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001323 goto out_put;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001324 }
1325 if (add_sched_out_event(atoms, 'R', timestamp))
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001326 goto out_put;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001327 }
1328
1329 BUG_ON(list_empty(&atoms->work_list));
1330
1331 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1332 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1333
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001334 sched->nr_timestamps++;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001335
1336 if (atom->sched_out_time > timestamp)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001337 sched->nr_unordered_timestamps++;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001338 err = 0;
1339out_put:
1340 thread__put(migrant);
1341 return err;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001342}
1343
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001344static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001345{
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001346 int i;
1347 int ret;
Frederic Weisbecker66685672009-09-13 01:56:25 +02001348 u64 avg;
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04001349 char max_lat_start[32], max_lat_end[32];
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001350
mingo39aeb522009-09-14 20:04:48 +02001351 if (!work_list->nb_atoms)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001352 return;
Ingo Molnarea57c4f2009-09-13 18:15:54 +02001353 /*
1354 * Ignore idle threads:
1355 */
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +02001356 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
Ingo Molnarea57c4f2009-09-13 18:15:54 +02001357 return;
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001358
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001359 sched->all_runtime += work_list->total_runtime;
1360 sched->all_count += work_list->nb_atoms;
Frederic Weisbecker66685672009-09-13 01:56:25 +02001361
Josef Bacik2f80dd42015-05-22 09:18:40 -04001362 if (work_list->num_merged > 1)
1363 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1364 else
1365 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001366
mingo08f69e62009-09-14 18:30:44 +02001367 for (i = 0; i < 24 - ret; i++)
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001368 printf(" ");
1369
mingo39aeb522009-09-14 20:04:48 +02001370 avg = work_list->total_lat / work_list->nb_atoms;
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04001371 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1372 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001373
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04001374 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -03001375 (double)work_list->total_runtime / NSEC_PER_MSEC,
1376 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1377 (double)work_list->max_lat / NSEC_PER_MSEC,
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04001378 max_lat_start, max_lat_end);
Frederic Weisbeckercdce9d72009-09-12 08:06:14 +02001379}
1380
mingo39aeb522009-09-14 20:04:48 +02001381static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001382{
Jiri Olsa0014de12015-11-02 12:10:25 +01001383 if (l->thread == r->thread)
1384 return 0;
Adrian Hunter38051232013-07-04 16:20:31 +03001385 if (l->thread->tid < r->thread->tid)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001386 return -1;
Adrian Hunter38051232013-07-04 16:20:31 +03001387 if (l->thread->tid > r->thread->tid)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001388 return 1;
Jiri Olsa0014de12015-11-02 12:10:25 +01001389 return (int)(l->thread - r->thread);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001390}
1391
mingo39aeb522009-09-14 20:04:48 +02001392static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001393{
1394 u64 avgl, avgr;
1395
1396 if (!l->nb_atoms)
1397 return -1;
1398
1399 if (!r->nb_atoms)
1400 return 1;
1401
1402 avgl = l->total_lat / l->nb_atoms;
1403 avgr = r->total_lat / r->nb_atoms;
1404
1405 if (avgl < avgr)
1406 return -1;
1407 if (avgl > avgr)
1408 return 1;
1409
1410 return 0;
1411}
1412
mingo39aeb522009-09-14 20:04:48 +02001413static int max_cmp(struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001414{
1415 if (l->max_lat < r->max_lat)
1416 return -1;
1417 if (l->max_lat > r->max_lat)
1418 return 1;
1419
1420 return 0;
1421}
1422
mingo39aeb522009-09-14 20:04:48 +02001423static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001424{
1425 if (l->nb_atoms < r->nb_atoms)
1426 return -1;
1427 if (l->nb_atoms > r->nb_atoms)
1428 return 1;
1429
1430 return 0;
1431}
1432
mingo39aeb522009-09-14 20:04:48 +02001433static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001434{
1435 if (l->total_runtime < r->total_runtime)
1436 return -1;
1437 if (l->total_runtime > r->total_runtime)
1438 return 1;
1439
1440 return 0;
1441}
1442
Randy Dunlapcbef79a2009-10-05 13:17:29 -07001443static int sort_dimension__add(const char *tok, struct list_head *list)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001444{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001445 size_t i;
1446 static struct sort_dimension avg_sort_dimension = {
1447 .name = "avg",
1448 .cmp = avg_cmp,
1449 };
1450 static struct sort_dimension max_sort_dimension = {
1451 .name = "max",
1452 .cmp = max_cmp,
1453 };
1454 static struct sort_dimension pid_sort_dimension = {
1455 .name = "pid",
1456 .cmp = pid_cmp,
1457 };
1458 static struct sort_dimension runtime_sort_dimension = {
1459 .name = "runtime",
1460 .cmp = runtime_cmp,
1461 };
1462 static struct sort_dimension switch_sort_dimension = {
1463 .name = "switch",
1464 .cmp = switch_cmp,
1465 };
1466 struct sort_dimension *available_sorts[] = {
1467 &pid_sort_dimension,
1468 &avg_sort_dimension,
1469 &max_sort_dimension,
1470 &switch_sort_dimension,
1471 &runtime_sort_dimension,
1472 };
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001473
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001474 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001475 if (!strcmp(available_sorts[i]->name, tok)) {
1476 list_add_tail(&available_sorts[i]->list, list);
1477
1478 return 0;
1479 }
1480 }
1481
1482 return -1;
1483}
1484
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001485static void perf_sched__sort_lat(struct perf_sched *sched)
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001486{
1487 struct rb_node *node;
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001488 struct rb_root_cached *root = &sched->atom_root;
Josef Bacik2f80dd42015-05-22 09:18:40 -04001489again:
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001490 for (;;) {
mingo39aeb522009-09-14 20:04:48 +02001491 struct work_atoms *data;
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001492 node = rb_first_cached(root);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001493 if (!node)
1494 break;
1495
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08001496 rb_erase_cached(node, root);
mingo39aeb522009-09-14 20:04:48 +02001497 data = rb_entry(node, struct work_atoms, node);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001498 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001499 }
Josef Bacik2f80dd42015-05-22 09:18:40 -04001500 if (root == &sched->atom_root) {
1501 root = &sched->merged_atom_root;
1502 goto again;
1503 }
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02001504}
1505
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001506static int process_sched_wakeup_event(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001507 struct evsel *evsel,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001508 struct perf_sample *sample,
Arnaldo Carvalho de Melo4218e672012-09-11 13:18:47 -03001509 struct machine *machine)
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001510{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001511 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001512
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001513 if (sched->tp_handler->wakeup_event)
1514 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001515
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001516 return 0;
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001517}
1518
Jiri Olsaa151a372016-04-12 15:29:29 +02001519union map_priv {
1520 void *ptr;
1521 bool color;
1522};
1523
1524static bool thread__has_color(struct thread *thread)
1525{
1526 union map_priv priv = {
1527 .ptr = thread__priv(thread),
1528 };
1529
1530 return priv.color;
1531}
1532
1533static struct thread*
1534map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1535{
1536 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1537 union map_priv priv = {
1538 .color = false,
1539 };
1540
1541 if (!sched->map.color_pids || !thread || thread__priv(thread))
1542 return thread;
1543
1544 if (thread_map__has(sched->map.color_pids, tid))
1545 priv.color = true;
1546
1547 thread__set_priv(thread, priv.ptr);
1548 return thread;
1549}
1550
Jiri Olsa32dcd022019-07-21 13:23:51 +02001551static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001552 struct perf_sample *sample, struct machine *machine)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001553{
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001554 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
Dongsheng Yang9d372ca2014-05-16 14:37:05 +09001555 struct thread *sched_in;
Changbin Du8640da92018-03-06 11:37:36 +08001556 struct thread_runtime *tr;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001557 int new_shortname;
Arnaldo Carvalho de Melo7f7f8d02012-08-07 11:33:42 -03001558 u64 timestamp0, timestamp = sample->time;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001559 s64 delta;
Ian Rogers6d188042022-01-04 22:13:51 -08001560 int i;
1561 struct perf_cpu this_cpu = {
1562 .cpu = sample->cpu,
1563 };
Jiri Olsa99623c62016-04-12 15:29:26 +02001564 int cpus_nr;
1565 bool new_cpu = false;
Jiri Olsa8cd91192016-04-12 15:29:27 +02001566 const char *color = PERF_COLOR_NORMAL;
Namhyung Kim99620a52016-10-24 11:02:45 +09001567 char stimestamp[32];
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001568
Ian Rogers6d188042022-01-04 22:13:51 -08001569 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001570
Ian Rogers6d188042022-01-04 22:13:51 -08001571 if (this_cpu.cpu > sched->max_cpu.cpu)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001572 sched->max_cpu = this_cpu;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001573
Jiri Olsa99623c62016-04-12 15:29:26 +02001574 if (sched->map.comp) {
1575 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
Sean Christopherson75d7ba32022-11-19 01:34:46 +00001576 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
Jiri Olsa99623c62016-04-12 15:29:26 +02001577 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1578 new_cpu = true;
1579 }
1580 } else
Ian Rogers6d188042022-01-04 22:13:51 -08001581 cpus_nr = sched->max_cpu.cpu;
Jiri Olsa99623c62016-04-12 15:29:26 +02001582
Ian Rogers6d188042022-01-04 22:13:51 -08001583 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1584 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001585 if (timestamp0)
1586 delta = timestamp - timestamp0;
1587 else
1588 delta = 0;
1589
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001590 if (delta < 0) {
Namhyung Kim60b7d142012-09-12 11:11:06 +09001591 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001592 return -1;
1593 }
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001594
Jiri Olsaa151a372016-04-12 15:29:29 +02001595 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001596 if (sched_in == NULL)
1597 return -1;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001598
Changbin Du8640da92018-03-06 11:37:36 +08001599 tr = thread__get_runtime(sched_in);
1600 if (tr == NULL) {
1601 thread__put(sched_in);
1602 return -1;
1603 }
1604
Ian Rogers6d188042022-01-04 22:13:51 -08001605 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001606
1607 printf(" ");
1608
1609 new_shortname = 0;
Changbin Du8640da92018-03-06 11:37:36 +08001610 if (!tr->shortname[0]) {
Dongsheng6bcab4e2014-05-06 14:39:01 +09001611 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1612 /*
1613 * Don't allocate a letter-number for swapper:0
1614 * as a shortname. Instead, we use '.' for it.
1615 */
Changbin Du8640da92018-03-06 11:37:36 +08001616 tr->shortname[0] = '.';
1617 tr->shortname[1] = ' ';
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001618 } else {
Changbin Du8640da92018-03-06 11:37:36 +08001619 tr->shortname[0] = sched->next_shortname1;
1620 tr->shortname[1] = sched->next_shortname2;
Dongsheng6bcab4e2014-05-06 14:39:01 +09001621
1622 if (sched->next_shortname1 < 'Z') {
1623 sched->next_shortname1++;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001624 } else {
Dongsheng6bcab4e2014-05-06 14:39:01 +09001625 sched->next_shortname1 = 'A';
1626 if (sched->next_shortname2 < '9')
1627 sched->next_shortname2++;
1628 else
1629 sched->next_shortname2 = '0';
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001630 }
1631 }
1632 new_shortname = 1;
1633 }
1634
Jiri Olsa99623c62016-04-12 15:29:26 +02001635 for (i = 0; i < cpus_nr; i++) {
Ian Rogers6d188042022-01-04 22:13:51 -08001636 struct perf_cpu cpu = {
1637 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1638 };
1639 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
Changbin Du8640da92018-03-06 11:37:36 +08001640 struct thread_runtime *curr_tr;
Jiri Olsaa151a372016-04-12 15:29:29 +02001641 const char *pid_color = color;
Jiri Olsacf294f22016-04-12 15:29:30 +02001642 const char *cpu_color = color;
Jiri Olsaa151a372016-04-12 15:29:29 +02001643
1644 if (curr_thread && thread__has_color(curr_thread))
1645 pid_color = COLOR_PIDS;
Jiri Olsa99623c62016-04-12 15:29:26 +02001646
Ian Rogersdfc66be2022-01-04 22:13:23 -08001647 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
Jiri Olsa73643bb2016-04-12 15:29:31 +02001648 continue;
1649
Ian Rogersdfc66be2022-01-04 22:13:23 -08001650 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
Jiri Olsacf294f22016-04-12 15:29:30 +02001651 cpu_color = COLOR_CPUS;
1652
Ian Rogers6d188042022-01-04 22:13:51 -08001653 if (cpu.cpu != this_cpu.cpu)
Namhyung Kim1208bb22016-10-24 11:02:43 +09001654 color_fprintf(stdout, color, " ");
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001655 else
Jiri Olsacf294f22016-04-12 15:29:30 +02001656 color_fprintf(stdout, cpu_color, "*");
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001657
Ian Rogers6d188042022-01-04 22:13:51 -08001658 if (sched->curr_thread[cpu.cpu]) {
1659 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
Changbin Du8640da92018-03-06 11:37:36 +08001660 if (curr_tr == NULL) {
1661 thread__put(sched_in);
1662 return -1;
1663 }
1664 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1665 } else
Jiri Olsa8cd91192016-04-12 15:29:27 +02001666 color_fprintf(stdout, color, " ");
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001667 }
1668
Ian Rogersdfc66be2022-01-04 22:13:23 -08001669 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
Jiri Olsa73643bb2016-04-12 15:29:31 +02001670 goto out;
1671
Namhyung Kim99620a52016-10-24 11:02:45 +09001672 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1673 color_fprintf(stdout, color, " %12s secs ", stimestamp);
Changbin Du99a3c3a2018-03-06 11:37:37 +08001674 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
Jiri Olsaa151a372016-04-12 15:29:29 +02001675 const char *pid_color = color;
1676
1677 if (thread__has_color(sched_in))
1678 pid_color = COLOR_PIDS;
1679
1680 color_fprintf(stdout, pid_color, "%s => %s:%d",
Changbin Du8640da92018-03-06 11:37:36 +08001681 tr->shortname, thread__comm_str(sched_in), sched_in->tid);
Changbin Du99a3c3a2018-03-06 11:37:37 +08001682 tr->comm_changed = false;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001683 }
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001684
Jiri Olsa99623c62016-04-12 15:29:26 +02001685 if (sched->map.comp && new_cpu)
Jiri Olsa8cd91192016-04-12 15:29:27 +02001686 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
Jiri Olsa99623c62016-04-12 15:29:26 +02001687
Jiri Olsa73643bb2016-04-12 15:29:31 +02001688out:
Jiri Olsa8cd91192016-04-12 15:29:27 +02001689 color_fprintf(stdout, color, "\n");
Jiri Olsa99623c62016-04-12 15:29:26 +02001690
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -03001691 thread__put(sched_in);
1692
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001693 return 0;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02001694}
1695
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001696static int process_sched_switch_event(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001697 struct evsel *evsel,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001698 struct perf_sample *sample,
Arnaldo Carvalho de Melo4218e672012-09-11 13:18:47 -03001699 struct machine *machine)
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001700{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001701 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001702 int this_cpu = sample->cpu, err = 0;
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03001703 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1704 next_pid = evsel__intval(evsel, sample, "next_pid");
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001705
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001706 if (sched->curr_pid[this_cpu] != (u32)-1) {
Ingo Molnarc8a37752009-09-16 14:07:00 +02001707 /*
1708 * Are we trying to switch away a PID that is
1709 * not current?
1710 */
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001711 if (sched->curr_pid[this_cpu] != prev_pid)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001712 sched->nr_context_switch_bugs++;
Ingo Molnarc8a37752009-09-16 14:07:00 +02001713 }
Ingo Molnarc8a37752009-09-16 14:07:00 +02001714
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001715 if (sched->tp_handler->switch_event)
1716 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001717
1718 sched->curr_pid[this_cpu] = next_pid;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001719 return err;
Frederic Weisbecker419ab0d2009-09-12 03:59:01 +02001720}
1721
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001722static int process_sched_runtime_event(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001723 struct evsel *evsel,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001724 struct perf_sample *sample,
Arnaldo Carvalho de Melo4218e672012-09-11 13:18:47 -03001725 struct machine *machine)
mingo39aeb522009-09-14 20:04:48 +02001726{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001727 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
mingo39aeb522009-09-14 20:04:48 +02001728
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001729 if (sched->tp_handler->runtime_event)
1730 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001731
1732 return 0;
Ingo Molnarec156762009-09-11 12:12:54 +02001733}
1734
David Aherncb627502013-08-07 22:50:47 -04001735static int perf_sched__process_fork_event(struct perf_tool *tool,
1736 union perf_event *event,
1737 struct perf_sample *sample,
1738 struct machine *machine)
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001739{
1740 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1741
Ingo Molnar4d39c892021-03-23 17:09:15 +01001742 /* run the fork event through the perf machinery */
David Aherncb627502013-08-07 22:50:47 -04001743 perf_event__process_fork(tool, event, sample, machine);
1744
1745 /* and then run additional processing needed for this command */
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001746 if (sched->tp_handler->fork_event)
David Aherncb627502013-08-07 22:50:47 -04001747 return sched->tp_handler->fork_event(sched, event, machine);
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001748
1749 return 0;
1750}
1751
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001752static int process_sched_migrate_task_event(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001753 struct evsel *evsel,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001754 struct perf_sample *sample,
Arnaldo Carvalho de Melo4218e672012-09-11 13:18:47 -03001755 struct machine *machine)
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001756{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03001757 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001758
Arnaldo Carvalho de Melo9ec3f4e2012-09-11 19:29:17 -03001759 if (sched->tp_handler->migrate_task_event)
1760 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001761
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001762 return 0;
Mike Galbraith55ffb7a2009-10-10 14:46:04 +02001763}
1764
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001765typedef int (*tracepoint_handler)(struct perf_tool *tool,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001766 struct evsel *evsel,
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001767 struct perf_sample *sample,
Arnaldo Carvalho de Melo4218e672012-09-11 13:18:47 -03001768 struct machine *machine);
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001769
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001770static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1771 union perf_event *event __maybe_unused,
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001772 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02001773 struct evsel *evsel,
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001774 struct machine *machine)
Ingo Molnarec156762009-09-11 12:12:54 +02001775{
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001776 int err = 0;
Ingo Molnarec156762009-09-11 12:12:54 +02001777
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -03001778 if (evsel->handler != NULL) {
1779 tracepoint_handler f = evsel->handler;
Arnaldo Carvalho de Melo2b7fcbc2012-09-11 19:29:17 -03001780 err = f(tool, evsel, sample, machine);
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001781 }
Ingo Molnar0a02ad92009-09-11 12:12:54 +02001782
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001783 return err;
Ingo Molnar0a02ad92009-09-11 12:12:54 +02001784}
1785
Changbin Du99a3c3a2018-03-06 11:37:37 +08001786static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1787 union perf_event *event,
1788 struct perf_sample *sample,
1789 struct machine *machine)
1790{
1791 struct thread *thread;
1792 struct thread_runtime *tr;
1793 int err;
1794
1795 err = perf_event__process_comm(tool, event, sample, machine);
1796 if (err)
1797 return err;
1798
1799 thread = machine__find_thread(machine, sample->pid, sample->tid);
1800 if (!thread) {
1801 pr_err("Internal error: can't find thread\n");
1802 return -1;
1803 }
1804
1805 tr = thread__get_runtime(thread);
1806 if (tr == NULL) {
1807 thread__put(thread);
1808 return -1;
1809 }
1810
1811 tr->comm_changed = true;
1812 thread__put(thread);
1813
1814 return 0;
1815}
1816
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03001817static int perf_sched__read_events(struct perf_sched *sched)
Ingo Molnar0a02ad92009-09-11 12:12:54 +02001818{
Jiri Olsa32dcd022019-07-21 13:23:51 +02001819 const struct evsel_str_handler handlers[] = {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001820 { "sched:sched_switch", process_sched_switch_event, },
1821 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1822 { "sched:sched_wakeup", process_sched_wakeup_event, },
1823 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001824 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1825 };
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001826 struct perf_session *session;
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01001827 struct perf_data data = {
Jiri Olsa2d4f2792019-02-21 10:41:30 +01001828 .path = input_name,
1829 .mode = PERF_DATA_MODE_READ,
1830 .force = sched->force,
Jiri Olsaf5fc14122013-10-15 16:27:32 +02001831 };
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03001832 int rc = -1;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001833
Namhyung Kim2681bd82021-07-19 15:31:49 -07001834 session = perf_session__new(&data, &sched->tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05301835 if (IS_ERR(session)) {
1836 pr_debug("Error creating perf session");
1837 return PTR_ERR(session);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001838 }
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001839
Namhyung Kim0a7e6d12014-08-12 15:40:45 +09001840 symbol__init(&session->header.env);
Namhyung Kim04934102014-08-12 15:40:41 +09001841
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001842 if (perf_session__set_tracepoints_handlers(session, handlers))
1843 goto out_delete;
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -02001844
Arnaldo Carvalho de Melocee75ac2010-05-14 13:16:55 -03001845 if (perf_session__has_traces(session, "record -R")) {
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001846 int err = perf_session__process_events(session);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001847 if (err) {
1848 pr_err("Failed to process events, error %d", err);
1849 goto out_delete;
1850 }
Jiri Olsa4c09baf2011-08-08 23:03:34 +02001851
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001852 sched->nr_events = session->evlist->stats.nr_events[0];
1853 sched->nr_lost_events = session->evlist->stats.total_lost;
1854 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
Arnaldo Carvalho de Melocee75ac2010-05-14 13:16:55 -03001855 }
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -02001856
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03001857 rc = 0;
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03001858out_delete:
1859 perf_session__delete(session);
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03001860 return rc;
Ingo Molnar0a02ad92009-09-11 12:12:54 +02001861}
1862
David Ahern49394a22016-11-16 15:06:29 +09001863/*
1864 * scheduling times are printed as msec.usec
1865 */
1866static inline void print_sched_time(unsigned long long nsecs, int width)
1867{
1868 unsigned long msecs;
1869 unsigned long usecs;
1870
1871 msecs = nsecs / NSEC_PER_MSEC;
1872 nsecs -= msecs * NSEC_PER_MSEC;
1873 usecs = nsecs / NSEC_PER_USEC;
1874 printf("%*lu.%03lu ", width, msecs, usecs);
1875}
1876
1877/*
1878 * returns runtime data for event, allocating memory for it the
1879 * first time it is used.
1880 */
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03001881static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
David Ahern49394a22016-11-16 15:06:29 +09001882{
1883 struct evsel_runtime *r = evsel->priv;
1884
1885 if (r == NULL) {
1886 r = zalloc(sizeof(struct evsel_runtime));
1887 evsel->priv = r;
1888 }
1889
1890 return r;
1891}
1892
1893/*
1894 * save last time event was seen per cpu
1895 */
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03001896static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
David Ahern49394a22016-11-16 15:06:29 +09001897{
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03001898 struct evsel_runtime *r = evsel__get_runtime(evsel);
David Ahern49394a22016-11-16 15:06:29 +09001899
1900 if (r == NULL)
1901 return;
1902
1903 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1904 int i, n = __roundup_pow_of_two(cpu+1);
1905 void *p = r->last_time;
1906
1907 p = realloc(r->last_time, n * sizeof(u64));
1908 if (!p)
1909 return;
1910
1911 r->last_time = p;
1912 for (i = r->ncpu; i < n; ++i)
1913 r->last_time[i] = (u64) 0;
1914
1915 r->ncpu = n;
1916 }
1917
1918 r->last_time[cpu] = timestamp;
1919}
1920
1921/* returns last time this event was seen on the given cpu */
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03001922static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
David Ahern49394a22016-11-16 15:06:29 +09001923{
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03001924 struct evsel_runtime *r = evsel__get_runtime(evsel);
David Ahern49394a22016-11-16 15:06:29 +09001925
1926 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1927 return 0;
1928
1929 return r->last_time[cpu];
1930}
1931
Namhyung Kim9b8087d2016-12-22 15:03:48 +09001932static int comm_width = 30;
David Ahern49394a22016-11-16 15:06:29 +09001933
1934static char *timehist_get_commstr(struct thread *thread)
1935{
1936 static char str[32];
1937 const char *comm = thread__comm_str(thread);
1938 pid_t tid = thread->tid;
1939 pid_t pid = thread->pid_;
1940 int n;
1941
1942 if (pid == 0)
1943 n = scnprintf(str, sizeof(str), "%s", comm);
1944
1945 else if (tid != pid)
1946 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1947
1948 else
1949 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1950
1951 if (n > comm_width)
1952 comm_width = n;
1953
1954 return str;
1955}
1956
David Aherna407b062016-11-16 15:06:33 +09001957static void timehist_header(struct perf_sched *sched)
David Ahern49394a22016-11-16 15:06:29 +09001958{
Ian Rogers6d188042022-01-04 22:13:51 -08001959 u32 ncpus = sched->max_cpu.cpu + 1;
David Aherna407b062016-11-16 15:06:33 +09001960 u32 i, j;
1961
David Ahern49394a22016-11-16 15:06:29 +09001962 printf("%15s %6s ", "time", "cpu");
1963
David Aherna407b062016-11-16 15:06:33 +09001964 if (sched->show_cpu_visual) {
1965 printf(" ");
1966 for (i = 0, j = 0; i < ncpus; ++i) {
1967 printf("%x", j++);
1968 if (j > 15)
1969 j = 0;
1970 }
1971 printf(" ");
1972 }
1973
Namhyung Kim0e6758e2016-12-22 15:03:48 +09001974 printf(" %-*s %9s %9s %9s", comm_width,
David Ahern49394a22016-11-16 15:06:29 +09001975 "task name", "wait time", "sch delay", "run time");
1976
Namhyung Kim414e0502017-01-13 19:45:22 +09001977 if (sched->show_state)
1978 printf(" %s", "state");
1979
David Ahern49394a22016-11-16 15:06:29 +09001980 printf("\n");
1981
1982 /*
1983 * units row
1984 */
1985 printf("%15s %-6s ", "", "");
1986
David Aherna407b062016-11-16 15:06:33 +09001987 if (sched->show_cpu_visual)
1988 printf(" %*s ", ncpus, "");
1989
Namhyung Kim414e0502017-01-13 19:45:22 +09001990 printf(" %-*s %9s %9s %9s", comm_width,
Namhyung Kim0e6758e2016-12-22 15:03:48 +09001991 "[tid/pid]", "(msec)", "(msec)", "(msec)");
David Ahern49394a22016-11-16 15:06:29 +09001992
Namhyung Kim414e0502017-01-13 19:45:22 +09001993 if (sched->show_state)
1994 printf(" %5s", "");
1995
1996 printf("\n");
1997
David Ahern49394a22016-11-16 15:06:29 +09001998 /*
1999 * separator
2000 */
2001 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2002
David Aherna407b062016-11-16 15:06:33 +09002003 if (sched->show_cpu_visual)
2004 printf(" %.*s ", ncpus, graph_dotted_line);
2005
Namhyung Kim0e6758e2016-12-22 15:03:48 +09002006 printf(" %.*s %.9s %.9s %.9s", comm_width,
David Ahern49394a22016-11-16 15:06:29 +09002007 graph_dotted_line, graph_dotted_line, graph_dotted_line,
2008 graph_dotted_line);
2009
Namhyung Kim414e0502017-01-13 19:45:22 +09002010 if (sched->show_state)
2011 printf(" %.5s", graph_dotted_line);
2012
David Ahern49394a22016-11-16 15:06:29 +09002013 printf("\n");
2014}
2015
Namhyung Kim414e0502017-01-13 19:45:22 +09002016static char task_state_char(struct thread *thread, int state)
2017{
2018 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
2019 unsigned bit = state ? ffs(state) : 0;
2020
2021 /* 'I' for idle */
2022 if (thread->tid == 0)
2023 return 'I';
2024
2025 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
2026}
2027
David Ahernfc1469f2016-11-16 15:06:31 +09002028static void timehist_print_sample(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002029 struct evsel *evsel,
David Ahernfc1469f2016-11-16 15:06:31 +09002030 struct perf_sample *sample,
David Ahern6c973c92016-11-16 15:06:32 +09002031 struct addr_location *al,
David Ahern853b7402016-11-29 10:15:44 -07002032 struct thread *thread,
Namhyung Kim414e0502017-01-13 19:45:22 +09002033 u64 t, int state)
David Ahern49394a22016-11-16 15:06:29 +09002034{
2035 struct thread_runtime *tr = thread__priv(thread);
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002036 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2037 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
Ian Rogers6d188042022-01-04 22:13:51 -08002038 u32 max_cpus = sched->max_cpu.cpu + 1;
David Ahern49394a22016-11-16 15:06:29 +09002039 char tstr[64];
Brendan Gregg292c4a82017-03-14 01:56:29 +00002040 char nstr[30];
Namhyung Kim941bdea2017-01-13 19:45:21 +09002041 u64 wait_time;
David Ahern49394a22016-11-16 15:06:29 +09002042
David Ahernc30d6302019-12-04 10:39:25 -07002043 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2044 return;
2045
David Ahern853b7402016-11-29 10:15:44 -07002046 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
David Ahern49394a22016-11-16 15:06:29 +09002047 printf("%15s [%04d] ", tstr, sample->cpu);
2048
David Aherna407b062016-11-16 15:06:33 +09002049 if (sched->show_cpu_visual) {
2050 u32 i;
2051 char c;
2052
2053 printf(" ");
2054 for (i = 0; i < max_cpus; ++i) {
2055 /* flag idle times with 'i'; others are sched events */
2056 if (i == sample->cpu)
2057 c = (thread->tid == 0) ? 'i' : 's';
2058 else
2059 c = ' ';
2060 printf("%c", c);
2061 }
2062 printf(" ");
2063 }
2064
David Ahern49394a22016-11-16 15:06:29 +09002065 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2066
Namhyung Kim941bdea2017-01-13 19:45:21 +09002067 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2068 print_sched_time(wait_time, 6);
2069
David Ahern49394a22016-11-16 15:06:29 +09002070 print_sched_time(tr->dt_delay, 6);
2071 print_sched_time(tr->dt_run, 6);
David Ahernfc1469f2016-11-16 15:06:31 +09002072
Namhyung Kim414e0502017-01-13 19:45:22 +09002073 if (sched->show_state)
2074 printf(" %5c ", task_state_char(thread, state));
2075
Brendan Gregg292c4a82017-03-14 01:56:29 +00002076 if (sched->show_next) {
2077 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2078 printf(" %-*s", comm_width, nstr);
2079 }
2080
2081 if (sched->show_wakeups && !sched->show_next)
David Ahernfc1469f2016-11-16 15:06:31 +09002082 printf(" %-*s", comm_width, "");
2083
David Ahern6c973c92016-11-16 15:06:32 +09002084 if (thread->tid == 0)
2085 goto out;
2086
2087 if (sched->show_callchain)
2088 printf(" ");
2089
2090 sample__fprintf_sym(sample, al, 0,
2091 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
Namhyung Kim2d9bbf62016-11-24 10:11:13 +09002092 EVSEL__PRINT_CALLCHAIN_ARROW |
2093 EVSEL__PRINT_SKIP_IGNORED,
Arnaldo Carvalho de Melo9620bc362019-09-25 15:06:59 -03002094 &callchain_cursor, symbol_conf.bt_stop_list, stdout);
David Ahern6c973c92016-11-16 15:06:32 +09002095
2096out:
David Ahern49394a22016-11-16 15:06:29 +09002097 printf("\n");
2098}
2099
2100/*
2101 * Explanation of delta-time stats:
2102 *
2103 * t = time of current schedule out event
2104 * tprev = time of previous sched out event
2105 * also time of schedule-in event for current task
2106 * last_time = time of last sched change event for current task
2107 * (i.e, time process was last scheduled out)
2108 * ready_to_run = time of wakeup for current task
2109 *
2110 * -----|------------|------------|------------|------
2111 * last ready tprev t
2112 * time to run
2113 *
2114 * |-------- dt_wait --------|
2115 * |- dt_delay -|-- dt_run --|
2116 *
2117 * dt_run = run time of current task
2118 * dt_wait = time between last schedule out event for task and tprev
2119 * represents time spent off the cpu
2120 * dt_delay = time between wakeup and schedule-in of task
2121 */
2122
2123static void timehist_update_runtime_stats(struct thread_runtime *r,
2124 u64 t, u64 tprev)
2125{
2126 r->dt_delay = 0;
Namhyung Kim941bdea2017-01-13 19:45:21 +09002127 r->dt_sleep = 0;
2128 r->dt_iowait = 0;
2129 r->dt_preempt = 0;
David Ahern49394a22016-11-16 15:06:29 +09002130 r->dt_run = 0;
Namhyung Kim941bdea2017-01-13 19:45:21 +09002131
David Ahern49394a22016-11-16 15:06:29 +09002132 if (tprev) {
2133 r->dt_run = t - tprev;
2134 if (r->ready_to_run) {
2135 if (r->ready_to_run > tprev)
2136 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2137 else
2138 r->dt_delay = tprev - r->ready_to_run;
2139 }
2140
2141 if (r->last_time > tprev)
2142 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
Namhyung Kim941bdea2017-01-13 19:45:21 +09002143 else if (r->last_time) {
2144 u64 dt_wait = tprev - r->last_time;
2145
2146 if (r->last_state == TASK_RUNNING)
2147 r->dt_preempt = dt_wait;
2148 else if (r->last_state == TASK_UNINTERRUPTIBLE)
2149 r->dt_iowait = dt_wait;
2150 else
2151 r->dt_sleep = dt_wait;
2152 }
David Ahern49394a22016-11-16 15:06:29 +09002153 }
2154
2155 update_stats(&r->run_stats, r->dt_run);
Namhyung Kim587782c2017-01-13 19:45:23 +09002156
2157 r->total_run_time += r->dt_run;
2158 r->total_delay_time += r->dt_delay;
2159 r->total_sleep_time += r->dt_sleep;
2160 r->total_iowait_time += r->dt_iowait;
2161 r->total_preempt_time += r->dt_preempt;
David Ahern49394a22016-11-16 15:06:29 +09002162}
2163
Namhyung Kim96039c72016-12-08 23:47:50 +09002164static bool is_idle_sample(struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002165 struct evsel *evsel)
David Ahern49394a22016-11-16 15:06:29 +09002166{
2167 /* pid 0 == swapper == idle task */
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002168 if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002169 return evsel__intval(evsel, sample, "prev_pid") == 0;
David Ahern49394a22016-11-16 15:06:29 +09002170
Namhyung Kim96039c72016-12-08 23:47:50 +09002171 return sample->pid == 0;
2172}
2173
2174static void save_task_callchain(struct perf_sched *sched,
2175 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002176 struct evsel *evsel,
Namhyung Kim96039c72016-12-08 23:47:50 +09002177 struct machine *machine)
2178{
2179 struct callchain_cursor *cursor = &callchain_cursor;
2180 struct thread *thread;
David Ahern6c973c92016-11-16 15:06:32 +09002181
2182 /* want main thread for process - has maps */
2183 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2184 if (thread == NULL) {
2185 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
Namhyung Kim96039c72016-12-08 23:47:50 +09002186 return;
David Ahern6c973c92016-11-16 15:06:32 +09002187 }
2188
Arnaldo Carvalho de Melo4c50563d2018-05-28 16:07:56 -03002189 if (!sched->show_callchain || sample->callchain == NULL)
Namhyung Kim96039c72016-12-08 23:47:50 +09002190 return;
David Ahern6c973c92016-11-16 15:06:32 +09002191
2192 if (thread__resolve_callchain(thread, cursor, evsel, sample,
Namhyung Kim8388deb2016-11-24 10:11:14 +09002193 NULL, NULL, sched->max_stack + 2) != 0) {
Namhyung Kimbb963e12017-02-17 17:17:38 +09002194 if (verbose > 0)
Arnaldo Carvalho de Melo62d94b02017-06-27 11:22:31 -03002195 pr_err("Failed to resolve callchain. Skipping\n");
David Ahern6c973c92016-11-16 15:06:32 +09002196
Namhyung Kim96039c72016-12-08 23:47:50 +09002197 return;
David Ahern6c973c92016-11-16 15:06:32 +09002198 }
Namhyung Kimcdeb01b2016-11-24 10:11:12 +09002199
David Ahern6c973c92016-11-16 15:06:32 +09002200 callchain_cursor_commit(cursor);
Namhyung Kimcdeb01b2016-11-24 10:11:12 +09002201
2202 while (true) {
2203 struct callchain_cursor_node *node;
2204 struct symbol *sym;
2205
2206 node = callchain_cursor_current(cursor);
2207 if (node == NULL)
2208 break;
2209
Arnaldo Carvalho de Melo5f0fef82019-11-04 12:14:32 -03002210 sym = node->ms.sym;
Arnaldo Carvalho de Meloa7c38992017-02-13 16:52:15 -03002211 if (sym) {
Namhyung Kimcdeb01b2016-11-24 10:11:12 +09002212 if (!strcmp(sym->name, "schedule") ||
2213 !strcmp(sym->name, "__schedule") ||
2214 !strcmp(sym->name, "preempt_schedule"))
2215 sym->ignore = 1;
2216 }
2217
2218 callchain_cursor_advance(cursor);
2219 }
David Ahern49394a22016-11-16 15:06:29 +09002220}
2221
Namhyung Kim3bc2fa92016-12-08 23:47:51 +09002222static int init_idle_thread(struct thread *thread)
2223{
2224 struct idle_thread_runtime *itr;
2225
2226 thread__set_comm(thread, idle_comm, 0);
2227
2228 itr = zalloc(sizeof(*itr));
2229 if (itr == NULL)
2230 return -ENOMEM;
2231
2232 init_stats(&itr->tr.run_stats);
2233 callchain_init(&itr->callchain);
2234 callchain_cursor_reset(&itr->cursor);
2235 thread__set_priv(thread, itr);
2236
2237 return 0;
2238}
2239
David Ahern49394a22016-11-16 15:06:29 +09002240/*
2241 * Track idle stats per cpu by maintaining a local thread
2242 * struct for the idle task on each cpu.
2243 */
2244static int init_idle_threads(int ncpu)
2245{
Namhyung Kim3bc2fa92016-12-08 23:47:51 +09002246 int i, ret;
David Ahern49394a22016-11-16 15:06:29 +09002247
2248 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2249 if (!idle_threads)
2250 return -ENOMEM;
2251
Namhyung Kimb3363522016-12-06 12:40:05 +09002252 idle_max_cpu = ncpu;
David Ahern49394a22016-11-16 15:06:29 +09002253
2254 /* allocate the actual thread struct if needed */
2255 for (i = 0; i < ncpu; ++i) {
2256 idle_threads[i] = thread__new(0, 0);
2257 if (idle_threads[i] == NULL)
2258 return -ENOMEM;
2259
Namhyung Kim3bc2fa92016-12-08 23:47:51 +09002260 ret = init_idle_thread(idle_threads[i]);
2261 if (ret < 0)
2262 return ret;
David Ahern49394a22016-11-16 15:06:29 +09002263 }
2264
2265 return 0;
2266}
2267
2268static void free_idle_threads(void)
2269{
2270 int i;
2271
2272 if (idle_threads == NULL)
2273 return;
2274
Namhyung Kimb3363522016-12-06 12:40:05 +09002275 for (i = 0; i < idle_max_cpu; ++i) {
David Ahern49394a22016-11-16 15:06:29 +09002276 if ((idle_threads[i]))
2277 thread__delete(idle_threads[i]);
2278 }
2279
2280 free(idle_threads);
2281}
2282
2283static struct thread *get_idle_thread(int cpu)
2284{
2285 /*
2286 * expand/allocate array of pointers to local thread
2287 * structs if needed
2288 */
2289 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2290 int i, j = __roundup_pow_of_two(cpu+1);
2291 void *p;
2292
2293 p = realloc(idle_threads, j * sizeof(struct thread *));
2294 if (!p)
2295 return NULL;
2296
2297 idle_threads = (struct thread **) p;
Namhyung Kimb3363522016-12-06 12:40:05 +09002298 for (i = idle_max_cpu; i < j; ++i)
David Ahern49394a22016-11-16 15:06:29 +09002299 idle_threads[i] = NULL;
2300
2301 idle_max_cpu = j;
2302 }
2303
2304 /* allocate a new thread struct if needed */
2305 if (idle_threads[cpu] == NULL) {
2306 idle_threads[cpu] = thread__new(0, 0);
2307
2308 if (idle_threads[cpu]) {
Namhyung Kim3bc2fa92016-12-08 23:47:51 +09002309 if (init_idle_thread(idle_threads[cpu]) < 0)
2310 return NULL;
David Ahern49394a22016-11-16 15:06:29 +09002311 }
2312 }
2313
2314 return idle_threads[cpu];
2315}
2316
Arnaldo Carvalho de Melo4c50563d2018-05-28 16:07:56 -03002317static void save_idle_callchain(struct perf_sched *sched,
2318 struct idle_thread_runtime *itr,
Namhyung Kim699b5b92016-12-08 23:47:52 +09002319 struct perf_sample *sample)
2320{
Arnaldo Carvalho de Melo4c50563d2018-05-28 16:07:56 -03002321 if (!sched->show_callchain || sample->callchain == NULL)
Namhyung Kim699b5b92016-12-08 23:47:52 +09002322 return;
2323
2324 callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2325}
2326
David Ahern6c973c92016-11-16 15:06:32 +09002327static struct thread *timehist_get_thread(struct perf_sched *sched,
2328 struct perf_sample *sample,
David Ahern49394a22016-11-16 15:06:29 +09002329 struct machine *machine,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002330 struct evsel *evsel)
David Ahern49394a22016-11-16 15:06:29 +09002331{
2332 struct thread *thread;
2333
Namhyung Kim96039c72016-12-08 23:47:50 +09002334 if (is_idle_sample(sample, evsel)) {
David Ahern49394a22016-11-16 15:06:29 +09002335 thread = get_idle_thread(sample->cpu);
2336 if (thread == NULL)
2337 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2338
2339 } else {
Namhyung Kim5d92d962016-12-06 12:40:03 +09002340 /* there were samples with tid 0 but non-zero pid */
2341 thread = machine__findnew_thread(machine, sample->pid,
2342 sample->tid ?: sample->pid);
David Ahern49394a22016-11-16 15:06:29 +09002343 if (thread == NULL) {
2344 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2345 sample->tid);
2346 }
Namhyung Kim96039c72016-12-08 23:47:50 +09002347
2348 save_task_callchain(sched, sample, evsel, machine);
Namhyung Kim699b5b92016-12-08 23:47:52 +09002349 if (sched->idle_hist) {
2350 struct thread *idle;
2351 struct idle_thread_runtime *itr;
2352
2353 idle = get_idle_thread(sample->cpu);
2354 if (idle == NULL) {
2355 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2356 return NULL;
2357 }
2358
2359 itr = thread__priv(idle);
2360 if (itr == NULL)
2361 return NULL;
2362
2363 itr->last_thread = thread;
2364
2365 /* copy task callchain when entering to idle */
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002366 if (evsel__intval(evsel, sample, "next_pid") == 0)
Arnaldo Carvalho de Melo4c50563d2018-05-28 16:07:56 -03002367 save_idle_callchain(sched, itr, sample);
Namhyung Kim699b5b92016-12-08 23:47:52 +09002368 }
David Ahern49394a22016-11-16 15:06:29 +09002369 }
2370
2371 return thread;
2372}
2373
David Ahern52df1382016-11-16 15:06:30 +09002374static bool timehist_skip_sample(struct perf_sched *sched,
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002375 struct thread *thread,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002376 struct evsel *evsel,
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002377 struct perf_sample *sample)
David Ahern49394a22016-11-16 15:06:29 +09002378{
2379 bool rc = false;
2380
David Ahern52df1382016-11-16 15:06:30 +09002381 if (thread__is_filtered(thread)) {
David Ahern49394a22016-11-16 15:06:29 +09002382 rc = true;
David Ahern52df1382016-11-16 15:06:30 +09002383 sched->skipped_samples++;
2384 }
David Ahern49394a22016-11-16 15:06:29 +09002385
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002386 if (sched->idle_hist) {
Arnaldo Carvalho de Melo8ab2e962020-04-29 16:07:09 -03002387 if (strcmp(evsel__name(evsel), "sched:sched_switch"))
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002388 rc = true;
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002389 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2390 evsel__intval(evsel, sample, "next_pid") != 0)
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002391 rc = true;
2392 }
2393
David Ahern49394a22016-11-16 15:06:29 +09002394 return rc;
2395}
2396
David Ahernfc1469f2016-11-16 15:06:31 +09002397static void timehist_print_wakeup_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002398 struct evsel *evsel,
David Ahernfc1469f2016-11-16 15:06:31 +09002399 struct perf_sample *sample,
2400 struct machine *machine,
2401 struct thread *awakened)
2402{
2403 struct thread *thread;
2404 char tstr[64];
2405
2406 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2407 if (thread == NULL)
2408 return;
2409
2410 /* show wakeup unless both awakee and awaker are filtered */
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002411 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2412 timehist_skip_sample(sched, awakened, evsel, sample)) {
David Ahernfc1469f2016-11-16 15:06:31 +09002413 return;
2414 }
2415
2416 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2417 printf("%15s [%04d] ", tstr, sample->cpu);
David Aherna407b062016-11-16 15:06:33 +09002418 if (sched->show_cpu_visual)
Ian Rogers6d188042022-01-04 22:13:51 -08002419 printf(" %*s ", sched->max_cpu.cpu + 1, "");
David Ahernfc1469f2016-11-16 15:06:31 +09002420
2421 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2422
2423 /* dt spacer */
2424 printf(" %9s %9s %9s ", "", "", "");
2425
2426 printf("awakened: %s", timehist_get_commstr(awakened));
2427
2428 printf("\n");
2429}
2430
David Ahernd566a9c2020-08-07 10:48:44 -06002431static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2432 union perf_event *event __maybe_unused,
2433 struct evsel *evsel __maybe_unused,
2434 struct perf_sample *sample __maybe_unused,
2435 struct machine *machine __maybe_unused)
2436{
2437 return 0;
2438}
2439
David Ahernfc1469f2016-11-16 15:06:31 +09002440static int timehist_sched_wakeup_event(struct perf_tool *tool,
David Ahern49394a22016-11-16 15:06:29 +09002441 union perf_event *event __maybe_unused,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002442 struct evsel *evsel,
David Ahern49394a22016-11-16 15:06:29 +09002443 struct perf_sample *sample,
2444 struct machine *machine)
2445{
David Ahernfc1469f2016-11-16 15:06:31 +09002446 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
David Ahern49394a22016-11-16 15:06:29 +09002447 struct thread *thread;
2448 struct thread_runtime *tr = NULL;
2449 /* want pid of awakened task not pid in sample */
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002450 const u32 pid = evsel__intval(evsel, sample, "pid");
David Ahern49394a22016-11-16 15:06:29 +09002451
2452 thread = machine__findnew_thread(machine, 0, pid);
2453 if (thread == NULL)
2454 return -1;
2455
2456 tr = thread__get_runtime(thread);
2457 if (tr == NULL)
2458 return -1;
2459
2460 if (tr->ready_to_run == 0)
2461 tr->ready_to_run = sample->time;
2462
David Ahernfc1469f2016-11-16 15:06:31 +09002463 /* show wakeups if requested */
David Ahern853b7402016-11-29 10:15:44 -07002464 if (sched->show_wakeups &&
2465 !perf_time__skip_sample(&sched->ptime, sample->time))
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002466 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
David Ahernfc1469f2016-11-16 15:06:31 +09002467
David Ahern49394a22016-11-16 15:06:29 +09002468 return 0;
2469}
2470
David Ahern350f54f2016-11-25 09:28:41 -07002471static void timehist_print_migration_event(struct perf_sched *sched,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002472 struct evsel *evsel,
David Ahern350f54f2016-11-25 09:28:41 -07002473 struct perf_sample *sample,
2474 struct machine *machine,
2475 struct thread *migrated)
2476{
2477 struct thread *thread;
2478 char tstr[64];
Ian Rogers6d188042022-01-04 22:13:51 -08002479 u32 max_cpus;
David Ahern350f54f2016-11-25 09:28:41 -07002480 u32 ocpu, dcpu;
2481
2482 if (sched->summary_only)
2483 return;
2484
Ian Rogers6d188042022-01-04 22:13:51 -08002485 max_cpus = sched->max_cpu.cpu + 1;
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002486 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2487 dcpu = evsel__intval(evsel, sample, "dest_cpu");
David Ahern350f54f2016-11-25 09:28:41 -07002488
2489 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2490 if (thread == NULL)
2491 return;
2492
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002493 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2494 timehist_skip_sample(sched, migrated, evsel, sample)) {
David Ahern350f54f2016-11-25 09:28:41 -07002495 return;
2496 }
2497
2498 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2499 printf("%15s [%04d] ", tstr, sample->cpu);
2500
2501 if (sched->show_cpu_visual) {
2502 u32 i;
2503 char c;
2504
2505 printf(" ");
2506 for (i = 0; i < max_cpus; ++i) {
2507 c = (i == sample->cpu) ? 'm' : ' ';
2508 printf("%c", c);
2509 }
2510 printf(" ");
2511 }
2512
2513 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2514
2515 /* dt spacer */
2516 printf(" %9s %9s %9s ", "", "", "");
2517
2518 printf("migrated: %s", timehist_get_commstr(migrated));
2519 printf(" cpu %d => %d", ocpu, dcpu);
2520
2521 printf("\n");
2522}
2523
2524static int timehist_migrate_task_event(struct perf_tool *tool,
2525 union perf_event *event __maybe_unused,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002526 struct evsel *evsel,
David Ahern350f54f2016-11-25 09:28:41 -07002527 struct perf_sample *sample,
2528 struct machine *machine)
2529{
2530 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2531 struct thread *thread;
2532 struct thread_runtime *tr = NULL;
2533 /* want pid of migrated task not pid in sample */
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002534 const u32 pid = evsel__intval(evsel, sample, "pid");
David Ahern350f54f2016-11-25 09:28:41 -07002535
2536 thread = machine__findnew_thread(machine, 0, pid);
2537 if (thread == NULL)
2538 return -1;
2539
2540 tr = thread__get_runtime(thread);
2541 if (tr == NULL)
2542 return -1;
2543
2544 tr->migrations++;
2545
2546 /* show migrations if requested */
2547 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2548
2549 return 0;
2550}
2551
David Ahern52df1382016-11-16 15:06:30 +09002552static int timehist_sched_change_event(struct perf_tool *tool,
David Ahern49394a22016-11-16 15:06:29 +09002553 union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002554 struct evsel *evsel,
David Ahern49394a22016-11-16 15:06:29 +09002555 struct perf_sample *sample,
2556 struct machine *machine)
2557{
David Ahernfc1469f2016-11-16 15:06:31 +09002558 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
David Ahern853b7402016-11-29 10:15:44 -07002559 struct perf_time_interval *ptime = &sched->ptime;
David Ahern49394a22016-11-16 15:06:29 +09002560 struct addr_location al;
2561 struct thread *thread;
2562 struct thread_runtime *tr = NULL;
David Ahern853b7402016-11-29 10:15:44 -07002563 u64 tprev, t = sample->time;
David Ahern49394a22016-11-16 15:06:29 +09002564 int rc = 0;
Arnaldo Carvalho de Meloefc0cdc2020-04-29 16:26:57 -03002565 int state = evsel__intval(evsel, sample, "prev_state");
David Ahern49394a22016-11-16 15:06:29 +09002566
2567 if (machine__resolve(machine, &al, sample) < 0) {
2568 pr_err("problem processing %d event. skipping it\n",
2569 event->header.type);
2570 rc = -1;
2571 goto out;
2572 }
2573
David Ahern6c973c92016-11-16 15:06:32 +09002574 thread = timehist_get_thread(sched, sample, machine, evsel);
David Ahern49394a22016-11-16 15:06:29 +09002575 if (thread == NULL) {
2576 rc = -1;
2577 goto out;
2578 }
2579
Namhyung Kima4b2b6f2016-12-08 23:47:53 +09002580 if (timehist_skip_sample(sched, thread, evsel, sample))
David Ahern49394a22016-11-16 15:06:29 +09002581 goto out;
2582
2583 tr = thread__get_runtime(thread);
2584 if (tr == NULL) {
2585 rc = -1;
2586 goto out;
2587 }
2588
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03002589 tprev = evsel__get_time(evsel, sample->cpu);
David Ahern49394a22016-11-16 15:06:29 +09002590
David Ahern853b7402016-11-29 10:15:44 -07002591 /*
2592 * If start time given:
2593 * - sample time is under window user cares about - skip sample
2594 * - tprev is under window user cares about - reset to start of window
2595 */
2596 if (ptime->start && ptime->start > t)
2597 goto out;
2598
Namhyung Kimbdd75722016-12-22 15:03:49 +09002599 if (tprev && ptime->start > tprev)
David Ahern853b7402016-11-29 10:15:44 -07002600 tprev = ptime->start;
2601
2602 /*
2603 * If end time given:
2604 * - previous sched event is out of window - we are done
2605 * - sample time is beyond window user cares about - reset it
2606 * to close out stats for time window interest
2607 */
2608 if (ptime->end) {
2609 if (tprev > ptime->end)
2610 goto out;
2611
2612 if (t > ptime->end)
2613 t = ptime->end;
2614 }
2615
Namhyung Kim07235f82016-12-08 23:47:54 +09002616 if (!sched->idle_hist || thread->tid == 0) {
David Aherna74eaf12020-08-17 11:09:42 -06002617 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2618 timehist_update_runtime_stats(tr, t, tprev);
Namhyung Kim07235f82016-12-08 23:47:54 +09002619
2620 if (sched->idle_hist) {
2621 struct idle_thread_runtime *itr = (void *)tr;
2622 struct thread_runtime *last_tr;
2623
2624 BUG_ON(thread->tid != 0);
2625
2626 if (itr->last_thread == NULL)
2627 goto out;
2628
2629 /* add current idle time as last thread's runtime */
2630 last_tr = thread__get_runtime(itr->last_thread);
2631 if (last_tr == NULL)
2632 goto out;
2633
2634 timehist_update_runtime_stats(last_tr, t, tprev);
2635 /*
2636 * remove delta time of last thread as it's not updated
2637 * and otherwise it will show an invalid value next
2638 * time. we only care total run time and run stat.
2639 */
2640 last_tr->dt_run = 0;
Namhyung Kim07235f82016-12-08 23:47:54 +09002641 last_tr->dt_delay = 0;
Namhyung Kim941bdea2017-01-13 19:45:21 +09002642 last_tr->dt_sleep = 0;
2643 last_tr->dt_iowait = 0;
2644 last_tr->dt_preempt = 0;
Namhyung Kim07235f82016-12-08 23:47:54 +09002645
Namhyung Kimba957eb2016-12-08 23:47:55 +09002646 if (itr->cursor.nr)
2647 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2648
Namhyung Kim07235f82016-12-08 23:47:54 +09002649 itr->last_thread = NULL;
2650 }
2651 }
David Ahern853b7402016-11-29 10:15:44 -07002652
David Ahern52df1382016-11-16 15:06:30 +09002653 if (!sched->summary_only)
Brendan Gregg292c4a82017-03-14 01:56:29 +00002654 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
David Ahern49394a22016-11-16 15:06:29 +09002655
2656out:
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002657 if (sched->hist_time.start == 0 && t >= ptime->start)
2658 sched->hist_time.start = t;
2659 if (ptime->end == 0 || t <= ptime->end)
2660 sched->hist_time.end = t;
2661
David Ahern49394a22016-11-16 15:06:29 +09002662 if (tr) {
2663 /* time of this sched_switch event becomes last time task seen */
2664 tr->last_time = sample->time;
2665
Namhyung Kim941bdea2017-01-13 19:45:21 +09002666 /* last state is used to determine where to account wait time */
Namhyung Kim414e0502017-01-13 19:45:22 +09002667 tr->last_state = state;
Namhyung Kim941bdea2017-01-13 19:45:21 +09002668
David Ahern49394a22016-11-16 15:06:29 +09002669 /* sched out event for task so reset ready to run time */
2670 tr->ready_to_run = 0;
2671 }
2672
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03002673 evsel__save_time(evsel, sample->time, sample->cpu);
David Ahern49394a22016-11-16 15:06:29 +09002674
2675 return rc;
2676}
2677
2678static int timehist_sched_switch_event(struct perf_tool *tool,
2679 union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002680 struct evsel *evsel,
David Ahern49394a22016-11-16 15:06:29 +09002681 struct perf_sample *sample,
2682 struct machine *machine __maybe_unused)
2683{
2684 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2685}
2686
2687static int process_lost(struct perf_tool *tool __maybe_unused,
2688 union perf_event *event,
2689 struct perf_sample *sample,
2690 struct machine *machine __maybe_unused)
2691{
2692 char tstr[64];
2693
2694 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2695 printf("%15s ", tstr);
Jiri Olsa5290ed62019-08-25 20:17:46 +02002696 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
David Ahern49394a22016-11-16 15:06:29 +09002697
2698 return 0;
2699}
2700
2701
David Ahern52df1382016-11-16 15:06:30 +09002702static void print_thread_runtime(struct thread *t,
2703 struct thread_runtime *r)
2704{
2705 double mean = avg_stats(&r->run_stats);
2706 float stddev;
2707
2708 printf("%*s %5d %9" PRIu64 " ",
2709 comm_width, timehist_get_commstr(t), t->ppid,
2710 (u64) r->run_stats.n);
2711
2712 print_sched_time(r->total_run_time, 8);
2713 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2714 print_sched_time(r->run_stats.min, 6);
2715 printf(" ");
2716 print_sched_time((u64) mean, 6);
2717 printf(" ");
2718 print_sched_time(r->run_stats.max, 6);
2719 printf(" ");
2720 printf("%5.2f", stddev);
David Ahern350f54f2016-11-25 09:28:41 -07002721 printf(" %5" PRIu64, r->migrations);
David Ahern52df1382016-11-16 15:06:30 +09002722 printf("\n");
2723}
2724
Namhyung Kim587782c2017-01-13 19:45:23 +09002725static void print_thread_waittime(struct thread *t,
2726 struct thread_runtime *r)
2727{
2728 printf("%*s %5d %9" PRIu64 " ",
2729 comm_width, timehist_get_commstr(t), t->ppid,
2730 (u64) r->run_stats.n);
2731
2732 print_sched_time(r->total_run_time, 8);
2733 print_sched_time(r->total_sleep_time, 6);
2734 printf(" ");
2735 print_sched_time(r->total_iowait_time, 6);
2736 printf(" ");
2737 print_sched_time(r->total_preempt_time, 6);
2738 printf(" ");
2739 print_sched_time(r->total_delay_time, 6);
2740 printf("\n");
2741}
2742
David Ahern52df1382016-11-16 15:06:30 +09002743struct total_run_stats {
Namhyung Kim587782c2017-01-13 19:45:23 +09002744 struct perf_sched *sched;
David Ahern52df1382016-11-16 15:06:30 +09002745 u64 sched_count;
2746 u64 task_count;
2747 u64 total_run_time;
2748};
2749
2750static int __show_thread_runtime(struct thread *t, void *priv)
2751{
2752 struct total_run_stats *stats = priv;
2753 struct thread_runtime *r;
2754
2755 if (thread__is_filtered(t))
2756 return 0;
2757
2758 r = thread__priv(t);
2759 if (r && r->run_stats.n) {
2760 stats->task_count++;
2761 stats->sched_count += r->run_stats.n;
2762 stats->total_run_time += r->total_run_time;
Namhyung Kim587782c2017-01-13 19:45:23 +09002763
2764 if (stats->sched->show_state)
2765 print_thread_waittime(t, r);
2766 else
2767 print_thread_runtime(t, r);
David Ahern52df1382016-11-16 15:06:30 +09002768 }
2769
2770 return 0;
2771}
2772
2773static int show_thread_runtime(struct thread *t, void *priv)
2774{
2775 if (t->dead)
2776 return 0;
2777
2778 return __show_thread_runtime(t, priv);
2779}
2780
2781static int show_deadthread_runtime(struct thread *t, void *priv)
2782{
2783 if (!t->dead)
2784 return 0;
2785
2786 return __show_thread_runtime(t, priv);
2787}
2788
Namhyung Kimba957eb2016-12-08 23:47:55 +09002789static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2790{
2791 const char *sep = " <- ";
2792 struct callchain_list *chain;
2793 size_t ret = 0;
2794 char bf[1024];
2795 bool first;
2796
2797 if (node == NULL)
2798 return 0;
2799
2800 ret = callchain__fprintf_folded(fp, node->parent);
2801 first = (ret == 0);
2802
2803 list_for_each_entry(chain, &node->val, list) {
2804 if (chain->ip >= PERF_CONTEXT_MAX)
2805 continue;
2806 if (chain->ms.sym && chain->ms.sym->ignore)
2807 continue;
2808 ret += fprintf(fp, "%s%s", first ? "" : sep,
2809 callchain_list__sym_name(chain, bf, sizeof(bf),
2810 false));
2811 first = false;
2812 }
2813
2814 return ret;
2815}
2816
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08002817static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
Namhyung Kimba957eb2016-12-08 23:47:55 +09002818{
2819 size_t ret = 0;
2820 FILE *fp = stdout;
2821 struct callchain_node *chain;
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08002822 struct rb_node *rb_node = rb_first_cached(root);
Namhyung Kimba957eb2016-12-08 23:47:55 +09002823
2824 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2825 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2826 graph_dotted_line);
2827
2828 while (rb_node) {
2829 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2830 rb_node = rb_next(rb_node);
2831
2832 ret += fprintf(fp, " ");
2833 print_sched_time(chain->hit, 12);
2834 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2835 ret += fprintf(fp, " %8d ", chain->count);
2836 ret += callchain__fprintf_folded(fp, chain);
2837 ret += fprintf(fp, "\n");
2838 }
2839
2840 return ret;
2841}
2842
David Ahern52df1382016-11-16 15:06:30 +09002843static void timehist_print_summary(struct perf_sched *sched,
2844 struct perf_session *session)
2845{
2846 struct machine *m = &session->machines.host;
2847 struct total_run_stats totals;
2848 u64 task_count;
2849 struct thread *t;
2850 struct thread_runtime *r;
2851 int i;
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002852 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
David Ahern52df1382016-11-16 15:06:30 +09002853
2854 memset(&totals, 0, sizeof(totals));
Namhyung Kim587782c2017-01-13 19:45:23 +09002855 totals.sched = sched;
David Ahern52df1382016-11-16 15:06:30 +09002856
Namhyung Kim07235f82016-12-08 23:47:54 +09002857 if (sched->idle_hist) {
2858 printf("\nIdle-time summary\n");
2859 printf("%*s parent sched-out ", comm_width, "comm");
2860 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
Namhyung Kim587782c2017-01-13 19:45:23 +09002861 } else if (sched->show_state) {
2862 printf("\nWait-time summary\n");
2863 printf("%*s parent sched-in ", comm_width, "comm");
2864 printf(" run-time sleep iowait preempt delay\n");
Namhyung Kim07235f82016-12-08 23:47:54 +09002865 } else {
2866 printf("\nRuntime summary\n");
2867 printf("%*s parent sched-in ", comm_width, "comm");
2868 printf(" run-time min-run avg-run max-run stddev migrations\n");
2869 }
David Ahern52df1382016-11-16 15:06:30 +09002870 printf("%*s (count) ", comm_width, "");
Namhyung Kim587782c2017-01-13 19:45:23 +09002871 printf(" (msec) (msec) (msec) (msec) %s\n",
2872 sched->show_state ? "(msec)" : "%");
David Ahern350f54f2016-11-25 09:28:41 -07002873 printf("%.117s\n", graph_dotted_line);
David Ahern52df1382016-11-16 15:06:30 +09002874
2875 machine__for_each_thread(m, show_thread_runtime, &totals);
2876 task_count = totals.task_count;
2877 if (!task_count)
2878 printf("<no still running tasks>\n");
2879
2880 printf("\nTerminated tasks:\n");
2881 machine__for_each_thread(m, show_deadthread_runtime, &totals);
2882 if (task_count == totals.task_count)
2883 printf("<no terminated tasks>\n");
2884
2885 /* CPU idle stats not tracked when samples were skipped */
Namhyung Kim07235f82016-12-08 23:47:54 +09002886 if (sched->skipped_samples && !sched->idle_hist)
David Ahern52df1382016-11-16 15:06:30 +09002887 return;
2888
2889 printf("\nIdle stats:\n");
Namhyung Kimb3363522016-12-06 12:40:05 +09002890 for (i = 0; i < idle_max_cpu; ++i) {
David Aherna74eaf12020-08-17 11:09:42 -06002891 if (cpu_list && !test_bit(i, cpu_bitmap))
2892 continue;
2893
David Ahern52df1382016-11-16 15:06:30 +09002894 t = idle_threads[i];
2895 if (!t)
2896 continue;
2897
2898 r = thread__priv(t);
2899 if (r && r->run_stats.n) {
2900 totals.sched_count += r->run_stats.n;
2901 printf(" CPU %2d idle for ", i);
2902 print_sched_time(r->total_run_time, 6);
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002903 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
David Ahern52df1382016-11-16 15:06:30 +09002904 } else
2905 printf(" CPU %2d idle entire time window\n", i);
2906 }
2907
Arnaldo Carvalho de Melo4c50563d2018-05-28 16:07:56 -03002908 if (sched->idle_hist && sched->show_callchain) {
Namhyung Kimba957eb2016-12-08 23:47:55 +09002909 callchain_param.mode = CHAIN_FOLDED;
2910 callchain_param.value = CCVAL_PERIOD;
2911
2912 callchain_register_param(&callchain_param);
2913
2914 printf("\nIdle stats by callchain:\n");
2915 for (i = 0; i < idle_max_cpu; ++i) {
2916 struct idle_thread_runtime *itr;
2917
2918 t = idle_threads[i];
2919 if (!t)
2920 continue;
2921
2922 itr = thread__priv(t);
2923 if (itr == NULL)
2924 continue;
2925
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08002926 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
Namhyung Kimba957eb2016-12-08 23:47:55 +09002927 0, &callchain_param);
2928
2929 printf(" CPU %2d:", i);
2930 print_sched_time(itr->tr.total_run_time, 6);
2931 printf(" msec\n");
2932 timehist_print_idlehist_callchain(&itr->sorted_root);
2933 printf("\n");
2934 }
2935 }
2936
David Ahern52df1382016-11-16 15:06:30 +09002937 printf("\n"
2938 " Total number of unique tasks: %" PRIu64 "\n"
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002939 "Total number of context switches: %" PRIu64 "\n",
David Ahern52df1382016-11-16 15:06:30 +09002940 totals.task_count, totals.sched_count);
2941
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002942 printf(" Total run time (msec): ");
David Ahern52df1382016-11-16 15:06:30 +09002943 print_sched_time(totals.total_run_time, 2);
2944 printf("\n");
Namhyung Kim9396c9c2016-12-22 15:03:50 +09002945
2946 printf(" Total scheduling time (msec): ");
2947 print_sched_time(hist_time, 2);
Ian Rogers6d188042022-01-04 22:13:51 -08002948 printf(" (x %d)\n", sched->max_cpu.cpu);
David Ahern52df1382016-11-16 15:06:30 +09002949}
2950
David Ahern49394a22016-11-16 15:06:29 +09002951typedef int (*sched_handler)(struct perf_tool *tool,
2952 union perf_event *event,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002953 struct evsel *evsel,
David Ahern49394a22016-11-16 15:06:29 +09002954 struct perf_sample *sample,
2955 struct machine *machine);
2956
2957static int perf_timehist__process_sample(struct perf_tool *tool,
2958 union perf_event *event,
2959 struct perf_sample *sample,
Jiri Olsa32dcd022019-07-21 13:23:51 +02002960 struct evsel *evsel,
David Ahern49394a22016-11-16 15:06:29 +09002961 struct machine *machine)
2962{
2963 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2964 int err = 0;
Ian Rogers6d188042022-01-04 22:13:51 -08002965 struct perf_cpu this_cpu = {
2966 .cpu = sample->cpu,
2967 };
David Ahern49394a22016-11-16 15:06:29 +09002968
Ian Rogers6d188042022-01-04 22:13:51 -08002969 if (this_cpu.cpu > sched->max_cpu.cpu)
David Ahern49394a22016-11-16 15:06:29 +09002970 sched->max_cpu = this_cpu;
2971
2972 if (evsel->handler != NULL) {
2973 sched_handler f = evsel->handler;
2974
2975 err = f(tool, event, evsel, sample, machine);
2976 }
2977
2978 return err;
2979}
2980
David Ahern6c973c92016-11-16 15:06:32 +09002981static int timehist_check_attr(struct perf_sched *sched,
Jiri Olsa63503db2019-07-21 13:23:52 +02002982 struct evlist *evlist)
David Ahern6c973c92016-11-16 15:06:32 +09002983{
Jiri Olsa32dcd022019-07-21 13:23:51 +02002984 struct evsel *evsel;
David Ahern6c973c92016-11-16 15:06:32 +09002985 struct evsel_runtime *er;
2986
Jiri Olsace9036a2019-07-21 13:24:23 +02002987 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
Arnaldo Carvalho de Melo3b7313f2020-05-04 13:56:31 -03002988 er = evsel__get_runtime(evsel);
David Ahern6c973c92016-11-16 15:06:32 +09002989 if (er == NULL) {
2990 pr_err("Failed to allocate memory for evsel runtime data\n");
2991 return -1;
2992 }
2993
Arnaldo Carvalho de Melo27de9b22018-05-28 16:00:29 -03002994 if (sched->show_callchain && !evsel__has_callchain(evsel)) {
David Ahern6c973c92016-11-16 15:06:32 +09002995 pr_info("Samples do not have callchains.\n");
2996 sched->show_callchain = 0;
2997 symbol_conf.use_callchain = 0;
2998 }
2999 }
3000
3001 return 0;
3002}
3003
David Ahern49394a22016-11-16 15:06:29 +09003004static int perf_sched__timehist(struct perf_sched *sched)
3005{
David Ahernd566a9c2020-08-07 10:48:44 -06003006 struct evsel_str_handler handlers[] = {
David Ahern49394a22016-11-16 15:06:29 +09003007 { "sched:sched_switch", timehist_sched_switch_event, },
3008 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
David Ahernd566a9c2020-08-07 10:48:44 -06003009 { "sched:sched_waking", timehist_sched_wakeup_event, },
David Ahern49394a22016-11-16 15:06:29 +09003010 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3011 };
Jiri Olsa32dcd022019-07-21 13:23:51 +02003012 const struct evsel_str_handler migrate_handlers[] = {
David Ahern350f54f2016-11-25 09:28:41 -07003013 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3014 };
Jiri Olsa8ceb41d2017-01-23 22:07:59 +01003015 struct perf_data data = {
Jiri Olsa2d4f2792019-02-21 10:41:30 +01003016 .path = input_name,
3017 .mode = PERF_DATA_MODE_READ,
3018 .force = sched->force,
David Ahern49394a22016-11-16 15:06:29 +09003019 };
3020
3021 struct perf_session *session;
Jiri Olsa63503db2019-07-21 13:23:52 +02003022 struct evlist *evlist;
David Ahern49394a22016-11-16 15:06:29 +09003023 int err = -1;
3024
3025 /*
3026 * event handlers for timehist option
3027 */
3028 sched->tool.sample = perf_timehist__process_sample;
3029 sched->tool.mmap = perf_event__process_mmap;
3030 sched->tool.comm = perf_event__process_comm;
3031 sched->tool.exit = perf_event__process_exit;
3032 sched->tool.fork = perf_event__process_fork;
3033 sched->tool.lost = process_lost;
3034 sched->tool.attr = perf_event__process_attr;
3035 sched->tool.tracing_data = perf_event__process_tracing_data;
3036 sched->tool.build_id = perf_event__process_build_id;
3037
3038 sched->tool.ordered_events = true;
3039 sched->tool.ordering_requires_timestamps = true;
3040
David Ahern6c973c92016-11-16 15:06:32 +09003041 symbol_conf.use_callchain = sched->show_callchain;
3042
Namhyung Kim2681bd82021-07-19 15:31:49 -07003043 session = perf_session__new(&data, &sched->tool);
Mamatha Inamdar6ef81c52019-08-22 12:50:49 +05303044 if (IS_ERR(session))
3045 return PTR_ERR(session);
David Ahern49394a22016-11-16 15:06:29 +09003046
David Ahernc30d6302019-12-04 10:39:25 -07003047 if (cpu_list) {
3048 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3049 if (err < 0)
3050 goto out;
3051 }
3052
David Ahern52df1382016-11-16 15:06:30 +09003053 evlist = session->evlist;
3054
David Ahern49394a22016-11-16 15:06:29 +09003055 symbol__init(&session->header.env);
3056
David Ahern853b7402016-11-29 10:15:44 -07003057 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3058 pr_err("Invalid time string\n");
3059 return -EINVAL;
3060 }
3061
David Ahern6c973c92016-11-16 15:06:32 +09003062 if (timehist_check_attr(sched, evlist) != 0)
3063 goto out;
3064
David Ahern49394a22016-11-16 15:06:29 +09003065 setup_pager();
3066
David Ahernd566a9c2020-08-07 10:48:44 -06003067 /* prefer sched_waking if it is captured */
Arnaldo Carvalho de Melob02736f2020-11-30 09:48:07 -03003068 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
David Ahernd566a9c2020-08-07 10:48:44 -06003069 handlers[1].handler = timehist_sched_wakeup_ignore;
3070
David Ahern49394a22016-11-16 15:06:29 +09003071 /* setup per-evsel handlers */
3072 if (perf_session__set_tracepoints_handlers(session, handlers))
3073 goto out;
3074
David Ahernf45bf8d2016-11-29 13:39:48 -07003075 /* sched_switch event at a minimum needs to exist */
Arnaldo Carvalho de Melob02736f2020-11-30 09:48:07 -03003076 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
David Ahernf45bf8d2016-11-29 13:39:48 -07003077 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
David Ahern49394a22016-11-16 15:06:29 +09003078 goto out;
David Ahernf45bf8d2016-11-29 13:39:48 -07003079 }
David Ahern49394a22016-11-16 15:06:29 +09003080
David Ahern350f54f2016-11-25 09:28:41 -07003081 if (sched->show_migrations &&
3082 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3083 goto out;
3084
David Ahern49394a22016-11-16 15:06:29 +09003085 /* pre-allocate struct for per-CPU idle stats */
Ian Rogers6d188042022-01-04 22:13:51 -08003086 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3087 if (sched->max_cpu.cpu == 0)
3088 sched->max_cpu.cpu = 4;
3089 if (init_idle_threads(sched->max_cpu.cpu))
David Ahern49394a22016-11-16 15:06:29 +09003090 goto out;
3091
David Ahern52df1382016-11-16 15:06:30 +09003092 /* summary_only implies summary option, but don't overwrite summary if set */
3093 if (sched->summary_only)
3094 sched->summary = sched->summary_only;
3095
3096 if (!sched->summary_only)
David Aherna407b062016-11-16 15:06:33 +09003097 timehist_header(sched);
David Ahern49394a22016-11-16 15:06:29 +09003098
3099 err = perf_session__process_events(session);
3100 if (err) {
3101 pr_err("Failed to process events, error %d", err);
3102 goto out;
3103 }
3104
David Ahern52df1382016-11-16 15:06:30 +09003105 sched->nr_events = evlist->stats.nr_events[0];
3106 sched->nr_lost_events = evlist->stats.total_lost;
3107 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3108
3109 if (sched->summary)
3110 timehist_print_summary(sched, session);
3111
David Ahern49394a22016-11-16 15:06:29 +09003112out:
3113 free_idle_threads();
3114 perf_session__delete(session);
3115
3116 return err;
3117}
3118
3119
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003120static void print_bad_events(struct perf_sched *sched)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003121{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003122 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003123 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003124 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3125 sched->nr_unordered_timestamps, sched->nr_timestamps);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003126 }
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003127 if (sched->nr_lost_events && sched->nr_events) {
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003128 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003129 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3130 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003131 }
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003132 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003133 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003134 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3135 sched->nr_context_switch_bugs, sched->nr_timestamps);
3136 if (sched->nr_lost_events)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003137 printf(" (due to lost events?)");
3138 printf("\n");
3139 }
3140}
3141
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003142static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
Josef Bacik2f80dd42015-05-22 09:18:40 -04003143{
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003144 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
Josef Bacik2f80dd42015-05-22 09:18:40 -04003145 struct work_atoms *this;
3146 const char *comm = thread__comm_str(data->thread), *this_comm;
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003147 bool leftmost = true;
Josef Bacik2f80dd42015-05-22 09:18:40 -04003148
3149 while (*new) {
3150 int cmp;
3151
3152 this = container_of(*new, struct work_atoms, node);
3153 parent = *new;
3154
3155 this_comm = thread__comm_str(this->thread);
3156 cmp = strcmp(comm, this_comm);
3157 if (cmp > 0) {
3158 new = &((*new)->rb_left);
3159 } else if (cmp < 0) {
3160 new = &((*new)->rb_right);
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003161 leftmost = false;
Josef Bacik2f80dd42015-05-22 09:18:40 -04003162 } else {
3163 this->num_merged++;
3164 this->total_runtime += data->total_runtime;
3165 this->nb_atoms += data->nb_atoms;
3166 this->total_lat += data->total_lat;
3167 list_splice(&data->work_list, &this->work_list);
3168 if (this->max_lat < data->max_lat) {
3169 this->max_lat = data->max_lat;
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04003170 this->max_lat_start = data->max_lat_start;
3171 this->max_lat_end = data->max_lat_end;
Josef Bacik2f80dd42015-05-22 09:18:40 -04003172 }
3173 zfree(&data);
3174 return;
3175 }
3176 }
3177
3178 data->num_merged++;
3179 rb_link_node(&data->node, parent, new);
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003180 rb_insert_color_cached(&data->node, root, leftmost);
Josef Bacik2f80dd42015-05-22 09:18:40 -04003181}
3182
3183static void perf_sched__merge_lat(struct perf_sched *sched)
3184{
3185 struct work_atoms *data;
3186 struct rb_node *node;
3187
3188 if (sched->skip_merge)
3189 return;
3190
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003191 while ((node = rb_first_cached(&sched->atom_root))) {
3192 rb_erase_cached(node, &sched->atom_root);
Josef Bacik2f80dd42015-05-22 09:18:40 -04003193 data = rb_entry(node, struct work_atoms, node);
3194 __merge_work_atoms(&sched->merged_atom_root, data);
3195 }
3196}
3197
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003198static int perf_sched__lat(struct perf_sched *sched)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003199{
3200 struct rb_node *next;
3201
3202 setup_pager();
David Ahernad9def72013-08-07 22:50:44 -04003203
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03003204 if (perf_sched__read_events(sched))
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003205 return -1;
David Ahernad9def72013-08-07 22:50:44 -04003206
Josef Bacik2f80dd42015-05-22 09:18:40 -04003207 perf_sched__merge_lat(sched);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003208 perf_sched__sort_lat(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003209
Joel Fernandes (Google)dc000c42020-09-25 19:56:34 -04003210 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3211 printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3212 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003213
Davidlohr Buesocb4c13a2018-12-06 11:18:19 -08003214 next = rb_first_cached(&sched->sorted_atom_root);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003215
3216 while (next) {
3217 struct work_atoms *work_list;
3218
3219 work_list = rb_entry(next, struct work_atoms, node);
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003220 output_lat_thread(sched, work_list);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003221 next = rb_next(next);
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03003222 thread__zput(work_list->thread);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003223 }
3224
Ramkumar Ramachandra80790e02014-03-17 10:18:21 -04003225 printf(" -----------------------------------------------------------------------------------------------------------------\n");
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02003226 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
Arnaldo Carvalho de Melo4fc76e42016-08-08 12:23:49 -03003227 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003228
3229 printf(" ---------------------------------------------------\n");
3230
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003231 print_bad_events(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003232 printf("\n");
3233
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003234 return 0;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003235}
3236
Jiri Olsa99623c62016-04-12 15:29:26 +02003237static int setup_map_cpus(struct perf_sched *sched)
3238{
Jiri Olsaf8548392019-07-21 13:23:49 +02003239 struct perf_cpu_map *map;
Jiri Olsa73643bb2016-04-12 15:29:31 +02003240
Ian Rogers6d188042022-01-04 22:13:51 -08003241 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
Jiri Olsa99623c62016-04-12 15:29:26 +02003242
3243 if (sched->map.comp) {
Ian Rogers6d188042022-01-04 22:13:51 -08003244 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
Jiri Olsacf294f22016-04-12 15:29:30 +02003245 if (!sched->map.comp_cpus)
3246 return -1;
Jiri Olsa99623c62016-04-12 15:29:26 +02003247 }
3248
Jiri Olsa73643bb2016-04-12 15:29:31 +02003249 if (!sched->map.cpus_str)
3250 return 0;
3251
Jiri Olsa9c3516d2019-07-21 13:24:30 +02003252 map = perf_cpu_map__new(sched->map.cpus_str);
Jiri Olsa73643bb2016-04-12 15:29:31 +02003253 if (!map) {
3254 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3255 return -1;
3256 }
3257
3258 sched->map.cpus = map;
Jiri Olsa99623c62016-04-12 15:29:26 +02003259 return 0;
3260}
3261
Jiri Olsaa151a372016-04-12 15:29:29 +02003262static int setup_color_pids(struct perf_sched *sched)
3263{
Jiri Olsa9749b902019-07-21 13:23:50 +02003264 struct perf_thread_map *map;
Jiri Olsaa151a372016-04-12 15:29:29 +02003265
3266 if (!sched->map.color_pids_str)
3267 return 0;
3268
3269 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3270 if (!map) {
3271 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3272 return -1;
3273 }
3274
3275 sched->map.color_pids = map;
3276 return 0;
3277}
3278
Jiri Olsacf294f22016-04-12 15:29:30 +02003279static int setup_color_cpus(struct perf_sched *sched)
3280{
Jiri Olsaf8548392019-07-21 13:23:49 +02003281 struct perf_cpu_map *map;
Jiri Olsacf294f22016-04-12 15:29:30 +02003282
3283 if (!sched->map.color_cpus_str)
3284 return 0;
3285
Jiri Olsa9c3516d2019-07-21 13:24:30 +02003286 map = perf_cpu_map__new(sched->map.color_cpus_str);
Jiri Olsacf294f22016-04-12 15:29:30 +02003287 if (!map) {
3288 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3289 return -1;
3290 }
3291
3292 sched->map.color_cpus = map;
3293 return 0;
3294}
3295
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003296static int perf_sched__map(struct perf_sched *sched)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003297{
Jiri Olsa99623c62016-04-12 15:29:26 +02003298 if (setup_map_cpus(sched))
3299 return -1;
Ingo Molnar40749d02009-09-17 18:24:55 +02003300
Jiri Olsaa151a372016-04-12 15:29:29 +02003301 if (setup_color_pids(sched))
3302 return -1;
3303
Jiri Olsacf294f22016-04-12 15:29:30 +02003304 if (setup_color_cpus(sched))
3305 return -1;
3306
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003307 setup_pager();
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03003308 if (perf_sched__read_events(sched))
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003309 return -1;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003310 print_bad_events(sched);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003311 return 0;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003312}
3313
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003314static int perf_sched__replay(struct perf_sched *sched)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003315{
3316 unsigned long i;
3317
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003318 calibrate_run_measurement_overhead(sched);
3319 calibrate_sleep_measurement_overhead(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003320
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003321 test_calibrations(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003322
Arnaldo Carvalho de Meloae536ac2015-03-02 22:28:41 -03003323 if (perf_sched__read_events(sched))
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003324 return -1;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003325
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003326 printf("nr_run_events: %ld\n", sched->nr_run_events);
3327 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3328 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003329
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003330 if (sched->targetless_wakeups)
3331 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3332 if (sched->multitarget_wakeups)
3333 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3334 if (sched->nr_run_events_optimized)
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003335 printf("run atoms optimized: %ld\n",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003336 sched->nr_run_events_optimized);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003337
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003338 print_task_traces(sched);
3339 add_cross_task_wakeups(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003340
Ian Rogers59c26662022-08-26 09:42:40 -07003341 sched->thread_funcs_exit = false;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003342 create_tasks(sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003343 printf("------------------------------------------------------------\n");
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003344 for (i = 0; i < sched->replay_repeat; i++)
3345 run_one_test(sched);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003346
Ian Rogers59c26662022-08-26 09:42:40 -07003347 sched->thread_funcs_exit = true;
Namhyung Kim165da802022-09-08 15:54:48 -07003348 destroy_tasks(sched);
Arnaldo Carvalho de Meloa116e052012-09-08 22:53:06 -03003349 return 0;
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003350}
3351
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003352static void setup_sorting(struct perf_sched *sched, const struct option *options,
3353 const char * const usage_msg[])
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02003354{
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003355 char *tmp, *tok, *str = strdup(sched->sort_order);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02003356
3357 for (tok = strtok_r(str, ", ", &tmp);
3358 tok; tok = strtok_r(NULL, ", ", &tmp)) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003359 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
Namhyung Kimc7118362015-10-25 00:49:27 +09003360 usage_with_options_msg(usage_msg, options,
3361 "Unknown --sort key: `%s'", tok);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02003362 }
3363 }
3364
3365 free(str);
3366
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003367 sort_dimension__add("pid", &sched->cmp_pid);
Frederic Weisbeckerdaa1d7a2009-09-13 03:36:29 +02003368}
3369
Yang Jihongb0f00852021-07-13 19:23:58 +08003370static bool schedstat_events_exposed(void)
3371{
3372 /*
3373 * Select "sched:sched_stat_wait" event to check
3374 * whether schedstat tracepoints are exposed.
3375 */
3376 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3377 false : true;
3378}
3379
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003380static int __cmd_record(int argc, const char **argv)
3381{
3382 unsigned int rec_argc, i, j;
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003383 char **rec_argv;
3384 const char **rec_argv_copy;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003385 const char * const record_args[] = {
3386 "record",
3387 "-a",
3388 "-R",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003389 "-m", "1024",
3390 "-c", "1",
3391 "-e", "sched:sched_switch",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003392 "-e", "sched:sched_stat_runtime",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003393 "-e", "sched:sched_process_fork",
Dongsheng7fff9592014-05-05 16:05:53 +09003394 "-e", "sched:sched_wakeup_new",
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003395 "-e", "sched:sched_migrate_task",
3396 };
Yang Jihongb0f00852021-07-13 19:23:58 +08003397
3398 /*
3399 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3400 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3401 * to prevent "perf sched record" execution failure, determine
3402 * whether to record schedstat events according to actual situation.
3403 */
3404 const char * const schedstat_args[] = {
3405 "-e", "sched:sched_stat_wait",
3406 "-e", "sched:sched_stat_sleep",
3407 "-e", "sched:sched_stat_iowait",
3408 };
3409 unsigned int schedstat_argc = schedstat_events_exposed() ?
3410 ARRAY_SIZE(schedstat_args) : 0;
3411
David Ahernd566a9c2020-08-07 10:48:44 -06003412 struct tep_event *waking_event;
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003413 int ret;
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003414
David Ahernd566a9c2020-08-07 10:48:44 -06003415 /*
3416 * +2 for either "-e", "sched:sched_wakeup" or
3417 * "-e", "sched:sched_waking"
3418 */
Yang Jihongb0f00852021-07-13 19:23:58 +08003419 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003420 rec_argv = calloc(rec_argc + 1, sizeof(char *));
Arnaldo Carvalho de Meloe462dc52011-01-10 10:48:47 -02003421 if (rec_argv == NULL)
Chris Samuelce47dc52010-11-13 13:35:06 +11003422 return -ENOMEM;
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003423 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3424 if (rec_argv_copy == NULL) {
3425 free(rec_argv);
3426 return -ENOMEM;
3427 }
Chris Samuelce47dc52010-11-13 13:35:06 +11003428
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003429 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3430 rec_argv[i] = strdup(record_args[i]);
3431
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003432 rec_argv[i++] = strdup("-e");
David Ahernd566a9c2020-08-07 10:48:44 -06003433 waking_event = trace_event__tp_format("sched", "sched_waking");
3434 if (!IS_ERR(waking_event))
3435 rec_argv[i++] = strdup("sched:sched_waking");
3436 else
3437 rec_argv[i++] = strdup("sched:sched_wakeup");
3438
Yang Jihongb0f00852021-07-13 19:23:58 +08003439 for (j = 0; j < schedstat_argc; j++)
3440 rec_argv[i++] = strdup(schedstat_args[j]);
3441
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003442 for (j = 1; j < (unsigned int)argc; j++, i++)
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003443 rec_argv[i] = strdup(argv[j]);
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003444
3445 BUG_ON(i != rec_argc);
3446
Ian Rogersd72e5cf2022-08-24 07:57:33 -07003447 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3448 ret = cmd_record(rec_argc, rec_argv_copy);
3449
3450 for (i = 0; i < rec_argc; i++)
3451 free(rec_argv[i]);
3452 free(rec_argv);
3453 free(rec_argv_copy);
3454
3455 return ret;
Ingo Molnar1fc35b22009-09-13 09:44:29 +02003456}
3457
Arnaldo Carvalho de Melob0ad8ea2017-03-27 11:47:20 -03003458int cmd_sched(int argc, const char **argv)
Ingo Molnar0a02ad92009-09-11 12:12:54 +02003459{
Rasmus Villemoes49b8e2b2018-11-03 00:06:23 +01003460 static const char default_sort_order[] = "avg, max, switch, runtime";
Adrian Hunter8a39df82013-10-22 10:34:15 +03003461 struct perf_sched sched = {
3462 .tool = {
3463 .sample = perf_sched__process_tracepoint_sample,
Changbin Du99a3c3a2018-03-06 11:37:37 +08003464 .comm = perf_sched__process_comm,
Hari Bathinif3b36142017-03-08 02:11:43 +05303465 .namespaces = perf_event__process_namespaces,
Adrian Hunter8a39df82013-10-22 10:34:15 +03003466 .lost = perf_event__process_lost,
3467 .fork = perf_sched__process_fork_event,
Jiri Olsa0a8cb852014-07-06 14:18:21 +02003468 .ordered_events = true,
Adrian Hunter8a39df82013-10-22 10:34:15 +03003469 },
3470 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3471 .sort_list = LIST_HEAD_INIT(sched.sort_list),
Adrian Hunter8a39df82013-10-22 10:34:15 +03003472 .sort_order = default_sort_order,
3473 .replay_repeat = 10,
3474 .profile_cpu = -1,
3475 .next_shortname1 = 'A',
3476 .next_shortname2 = '0',
Josef Bacik2f80dd42015-05-22 09:18:40 -04003477 .skip_merge = 0,
David Ahern6c973c92016-11-16 15:06:32 +09003478 .show_callchain = 1,
3479 .max_stack = 5,
Adrian Hunter8a39df82013-10-22 10:34:15 +03003480 };
Namhyung Kim77f02f42016-10-24 12:00:03 +09003481 const struct option sched_options[] = {
3482 OPT_STRING('i', "input", &input_name, "file",
3483 "input file name"),
3484 OPT_INCR('v', "verbose", &verbose,
3485 "be more verbose (show symbol address, etc)"),
3486 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3487 "dump raw trace in ASCII"),
Namhyung Kim6fa94252016-12-06 12:40:01 +09003488 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
Namhyung Kim77f02f42016-10-24 12:00:03 +09003489 OPT_END()
3490 };
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003491 const struct option latency_options[] = {
3492 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3493 "sort by key(s): runtime, switch, avg, max"),
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003494 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3495 "CPU to profile on"),
Josef Bacik2f80dd42015-05-22 09:18:40 -04003496 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3497 "latency stats per pid instead of per comm"),
Namhyung Kim77f02f42016-10-24 12:00:03 +09003498 OPT_PARENT(sched_options)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003499 };
3500 const struct option replay_options[] = {
3501 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3502 "repeat the workload replay N times (-1: infinite)"),
Namhyung Kim77f02f42016-10-24 12:00:03 +09003503 OPT_PARENT(sched_options)
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003504 };
Jiri Olsa99623c62016-04-12 15:29:26 +02003505 const struct option map_options[] = {
3506 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3507 "map output in compact mode"),
Jiri Olsaa151a372016-04-12 15:29:29 +02003508 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3509 "highlight given pids in map"),
Jiri Olsacf294f22016-04-12 15:29:30 +02003510 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3511 "highlight given CPUs in map"),
Jiri Olsa73643bb2016-04-12 15:29:31 +02003512 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3513 "display given CPUs in map"),
Namhyung Kim77f02f42016-10-24 12:00:03 +09003514 OPT_PARENT(sched_options)
Jiri Olsa99623c62016-04-12 15:29:26 +02003515 };
David Ahern49394a22016-11-16 15:06:29 +09003516 const struct option timehist_options[] = {
3517 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3518 "file", "vmlinux pathname"),
3519 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3520 "file", "kallsyms pathname"),
David Ahern6c973c92016-11-16 15:06:32 +09003521 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3522 "Display call chains if present (default on)"),
3523 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3524 "Maximum number of functions to display backtrace."),
David Ahern49394a22016-11-16 15:06:29 +09003525 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3526 "Look for files with symbols relative to this directory"),
David Ahern52df1382016-11-16 15:06:30 +09003527 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3528 "Show only syscall summary with statistics"),
3529 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3530 "Show all syscalls and summary with statistics"),
David Ahernfc1469f2016-11-16 15:06:31 +09003531 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
Brendan Gregg292c4a82017-03-14 01:56:29 +00003532 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
David Ahern350f54f2016-11-25 09:28:41 -07003533 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
David Aherna407b062016-11-16 15:06:33 +09003534 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
Namhyung Kim07235f82016-12-08 23:47:54 +09003535 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
David Ahern853b7402016-11-29 10:15:44 -07003536 OPT_STRING(0, "time", &sched.time_str, "str",
3537 "Time span for analysis (start,stop)"),
Namhyung Kim414e0502017-01-13 19:45:22 +09003538 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
David Ahern0f59d7a2017-09-01 10:49:12 -07003539 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3540 "analyze events only for given process id(s)"),
3541 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3542 "analyze events only for given thread id(s)"),
David Ahernc30d6302019-12-04 10:39:25 -07003543 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
David Ahern49394a22016-11-16 15:06:29 +09003544 OPT_PARENT(sched_options)
3545 };
3546
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003547 const char * const latency_usage[] = {
3548 "perf sched latency [<options>]",
3549 NULL
3550 };
3551 const char * const replay_usage[] = {
3552 "perf sched replay [<options>]",
3553 NULL
3554 };
Jiri Olsa99623c62016-04-12 15:29:26 +02003555 const char * const map_usage[] = {
3556 "perf sched map [<options>]",
3557 NULL
3558 };
David Ahern49394a22016-11-16 15:06:29 +09003559 const char * const timehist_usage[] = {
3560 "perf sched timehist [<options>]",
3561 NULL
3562 };
Ramkumar Ramachandraa83edb22014-03-14 23:17:54 -04003563 const char *const sched_subcommands[] = { "record", "latency", "map",
David Ahern49394a22016-11-16 15:06:29 +09003564 "replay", "script",
3565 "timehist", NULL };
Ramkumar Ramachandraa83edb22014-03-14 23:17:54 -04003566 const char *sched_usage[] = {
3567 NULL,
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003568 NULL
3569 };
3570 struct trace_sched_handler lat_ops = {
3571 .wakeup_event = latency_wakeup_event,
3572 .switch_event = latency_switch_event,
3573 .runtime_event = latency_runtime_event,
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003574 .migrate_task_event = latency_migrate_task_event,
3575 };
3576 struct trace_sched_handler map_ops = {
3577 .switch_event = map_switch_event,
3578 };
3579 struct trace_sched_handler replay_ops = {
3580 .wakeup_event = replay_wakeup_event,
3581 .switch_event = replay_switch_event,
3582 .fork_event = replay_fork_event,
3583 };
Adrian Hunter156a2b02013-10-22 10:34:16 +03003584 unsigned int i;
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003585 int ret = 0;
Adrian Hunter156a2b02013-10-22 10:34:16 +03003586
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003587 mutex_init(&sched.start_work_mutex);
3588 mutex_init(&sched.work_done_wait_mutex);
Adrian Hunter156a2b02013-10-22 10:34:16 +03003589 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3590 sched.curr_pid[i] = -1;
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003591
Ramkumar Ramachandraa83edb22014-03-14 23:17:54 -04003592 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3593 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
Ingo Molnarf2858d82009-09-11 12:12:54 +02003594 if (!argc)
3595 usage_with_options(sched_usage, sched_options);
3596
Xiao Guangrongc0777c52009-12-07 12:04:49 +08003597 /*
Ingo Molnar133dc4c2010-11-16 18:45:39 +01003598 * Aliased to 'perf script' for now:
Xiao Guangrongc0777c52009-12-07 12:04:49 +08003599 */
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003600 if (!strcmp(argv[0], "script")) {
3601 ret = cmd_script(argc, argv);
3602 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3603 ret = __cmd_record(argc, argv);
Yang Jihong628881e2022-08-08 17:24:08 +08003604 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003605 sched.tp_handler = &lat_ops;
Ingo Molnarf2858d82009-09-11 12:12:54 +02003606 if (argc > 1) {
3607 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3608 if (argc)
3609 usage_with_options(latency_usage, latency_options);
Ingo Molnarf2858d82009-09-11 12:12:54 +02003610 }
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003611 setup_sorting(&sched, latency_options, latency_usage);
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003612 ret = perf_sched__lat(&sched);
Ingo Molnar0ec04e12009-09-16 17:40:48 +02003613 } else if (!strcmp(argv[0], "map")) {
Jiri Olsa99623c62016-04-12 15:29:26 +02003614 if (argc) {
Jiri Olsaa151a372016-04-12 15:29:29 +02003615 argc = parse_options(argc, argv, map_options, map_usage, 0);
Jiri Olsa99623c62016-04-12 15:29:26 +02003616 if (argc)
3617 usage_with_options(map_usage, map_options);
3618 }
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003619 sched.tp_handler = &map_ops;
3620 setup_sorting(&sched, latency_options, latency_usage);
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003621 ret = perf_sched__map(&sched);
Wei Liae0f4eb2022-03-25 17:20:32 +08003622 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
Arnaldo Carvalho de Melo0e9b07e2012-09-11 17:29:27 -03003623 sched.tp_handler = &replay_ops;
Ingo Molnarf2858d82009-09-11 12:12:54 +02003624 if (argc) {
3625 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3626 if (argc)
3627 usage_with_options(replay_usage, replay_options);
3628 }
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003629 ret = perf_sched__replay(&sched);
David Ahern49394a22016-11-16 15:06:29 +09003630 } else if (!strcmp(argv[0], "timehist")) {
3631 if (argc) {
3632 argc = parse_options(argc, argv, timehist_options,
3633 timehist_usage, 0);
3634 if (argc)
3635 usage_with_options(timehist_usage, timehist_options);
3636 }
Brendan Gregg292c4a82017-03-14 01:56:29 +00003637 if ((sched.show_wakeups || sched.show_next) &&
3638 sched.summary_only) {
3639 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
David Ahernfc1469f2016-11-16 15:06:31 +09003640 parse_options_usage(timehist_usage, timehist_options, "s", true);
Brendan Gregg292c4a82017-03-14 01:56:29 +00003641 if (sched.show_wakeups)
3642 parse_options_usage(NULL, timehist_options, "w", true);
3643 if (sched.show_next)
3644 parse_options_usage(NULL, timehist_options, "n", true);
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003645 ret = -EINVAL;
3646 goto out;
David Ahernfc1469f2016-11-16 15:06:31 +09003647 }
James Clark7cc72552021-10-18 14:48:42 +01003648 ret = symbol__validate_sym_arguments();
3649 if (ret)
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003650 goto out;
David Ahernfc1469f2016-11-16 15:06:31 +09003651
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003652 ret = perf_sched__timehist(&sched);
Ingo Molnarf2858d82009-09-11 12:12:54 +02003653 } else {
3654 usage_with_options(sched_usage, sched_options);
Ingo Molnar0a02ad92009-09-11 12:12:54 +02003655 }
3656
Ian Rogers0bd14ac2022-08-26 09:42:32 -07003657out:
3658 mutex_destroy(&sched.start_work_mutex);
3659 mutex_destroy(&sched.work_done_wait_mutex);
3660
3661 return ret;
Ingo Molnar0a02ad92009-09-11 12:12:54 +02003662}