Merge tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
- Fix a bug caused by not cleaning up the new instance unique triggers
when deleting an instance. It also creates a selftest that triggers
that bug.
- Fix the delayed optimization happening after kprobes boot up self
tests being removed by freeing of init memory.
- Comment kprobes on why the delay optimization is not a problem for
removal of modules, to keep other developers from searching that
riddle.
- Fix another case of rcu not watching in stack trace tracing.
* tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Make sure RCU is watching before calling a stack trace
kprobes: Document how optimized kprobes are removed from module unload
selftests/ftrace: Add test to remove instance with active event triggers
selftests/ftrace: Fix bashisms
ftrace: Remove #ifdef from code and add clear_ftrace_function_probes() stub
ftrace/instances: Clear function triggers when removing instances
ftrace: Simplify glob handling in unregister_ftrace_function_probe_func()
tracing/kprobes: Enforce kprobes teardown after testing
tracing: Move postpone selftests to core from early_initcall
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 30f90c1..541df0b 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -349,6 +349,9 @@
int write, void __user *buffer,
size_t *length, loff_t *ppos);
#endif
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7367e0e..2d2d3a5 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -595,7 +595,7 @@
}
/* Wait for completing optimization and unoptimization */
-static void wait_for_kprobe_optimizer(void)
+void wait_for_kprobe_optimizer(void)
{
mutex_lock(&kprobe_mutex);
@@ -2183,6 +2183,12 @@
* The vaddr this probe is installed will soon
* be vfreed buy not synced to disk. Hence,
* disarming the breakpoint isn't needed.
+ *
+ * Note, this will also move any optimized probes
+ * that are pending to be removed from their
+ * corresponding lists to the freeing_list and
+ * will not be touched by the delayed
+ * kprobe_optimizer work handler.
*/
kill_kprobe(p);
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 39dca4e..74fdfe9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4144,9 +4144,9 @@
int i, ret = -ENODEV;
int size;
- if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
+ if (!glob || !strlen(glob) || !strcmp(glob, "*"))
func_g.search = NULL;
- else if (glob) {
+ else {
int not;
func_g.type = filter_parse_regex(glob, strlen(glob),
@@ -4256,6 +4256,14 @@
return ret;
}
+void clear_ftrace_function_probes(struct trace_array *tr)
+{
+ struct ftrace_func_probe *probe, *n;
+
+ list_for_each_entry_safe(probe, n, &tr->func_probes, list)
+ unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
+}
+
static LIST_HEAD(ftrace_commands);
static DEFINE_MUTEX(ftrace_cmd_mutex);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c4536c4..1122f15 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1558,7 +1558,7 @@
return 0;
}
-early_initcall(init_trace_selftests);
+core_initcall(init_trace_selftests);
#else
static inline int run_tracer_selftest(struct tracer *type)
{
@@ -2568,7 +2568,36 @@
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
+
+ if (rcu_is_watching()) {
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ return;
+ }
+
+ /*
+ * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
+ * but if the above rcu_is_watching() failed, then the NMI
+ * triggered someplace critical, and rcu_irq_enter() should
+ * not be called from NMI.
+ */
+ if (unlikely(in_nmi()))
+ return;
+
+ /*
+ * It is possible that a function is being traced in a
+ * location that RCU is not watching. A call to
+ * rcu_irq_enter() will make sure that it is, but there's
+ * a few internal rcu functions that could be traced
+ * where that wont work either. In those cases, we just
+ * do nothing.
+ */
+ if (unlikely(rcu_irq_enter_disabled()))
+ return;
+
+ rcu_irq_enter_irqson();
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ rcu_irq_exit_irqson();
}
/**
@@ -7550,6 +7579,7 @@
}
tracing_set_nop(tr);
+ clear_ftrace_function_probes(tr);
event_trace_del_tracer(tr);
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 291a1bc..39fd773 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -980,6 +980,7 @@
extern int
unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
struct ftrace_probe_ops *ops);
+extern void clear_ftrace_function_probes(struct trace_array *tr);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -998,6 +999,10 @@
{
return -EINVAL;
}
+static inline void clear_ftrace_function_probes(struct trace_array *tr)
+{
+}
+
/*
* The ops parameter passed in is usually undefined.
* This must be a macro.
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8485f67..c129fca 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1535,6 +1535,11 @@
end:
release_all_trace_kprobes();
+ /*
+ * Wait for the optimizer work to finish. Otherwise it might fiddle
+ * with probes in already freed __init text.
+ */
+ wait_for_kprobe_optimizer();
if (warn)
pr_cont("NG: Some tests are failed. Please check them.\n");
else
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 32e6211..7175811 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -58,7 +58,7 @@
;;
--verbose|-v|-vv)
VERBOSE=$((VERBOSE + 1))
- [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1))
+ [ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1))
shift 1
;;
--debug|-d)
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
index 07bb3e5..aa31368 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
@@ -48,7 +48,7 @@
e=`cat $EVENT_ENABLE`
if [ "$e" != $val ]; then
echo "Expected $val but found $e"
- exit -1
+ exit 1
fi
}
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 9aec6fc..f2019b3 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -34,10 +34,10 @@
echo > set_ftrace_filter
grep -v '^#' set_ftrace_filter | while read t; do
tr=`echo $t | cut -d: -f2`
- if [ "$tr" == "" ]; then
+ if [ "$tr" = "" ]; then
continue
fi
- if [ $tr == "enable_event" -o $tr == "disable_event" ]; then
+ if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
tr=`echo $t | cut -d: -f1-4`
limit=`echo $t | cut -d: -f5`
else
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index 4c5a061..c73db78 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -75,9 +75,13 @@
if [ -d foo ]; then
fail "foo still exists"
fi
-exit 0
-
+mkdir foo
+echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter
+rmdir foo
+if [ -d foo ]; then
+ fail "foo still exists"
+fi
instance_slam() {