summaryrefslogtreecommitdiffstats
path: root/kernel/kernel/trace
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:13:28 -0800
committerYunhong Jiang <yunhong.jiang@linux.intel.com>2017-03-08 23:36:15 -0800
commit52f993b8e89487ec9ee15a7fb4979e0f09a45b27 (patch)
treed65304486afe0bea4a311c783c0d72791c8c0aa2 /kernel/kernel/trace
parentc189ccac5702322ed843fe17057035b7222a59b6 (diff)
Upgrade to 4.4.50-rt62
The current kernel is based on rt kernel v4.4.6-rt14. We will upgrade it to 4.4.50-rt62. The command to achieve it is: a) Clone a git repo from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git b) Get the diff between this two changesets: git diff 640eca2901f3435e616157b11379d3223a44b391 705619beeea1b0b48219a683fd1a901a86fdaf5e where the two commits are: [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 640eca2901f3435e616157b11379d3223a44b391 640eca2901f3 v4.4.6-rt14 localversion-rt [yjiang5@jnakajim-build linux-stable-rt]$ git show --oneline --name-only 705619beeea1b0b48219a683fd1a901a86fdaf5e 705619beeea1 Linux 4.4.50-rt62 localversion-rt c) One patch has been backported thus revert the patch before applying. filterdiff -p1 -x scripts/package/Makefile ~/tmp/v4.4.6-rt14-4.4.50-rt62.diff |patch -p1 --dry-run Upstream status: backport Change-Id: I244d57a32f6066e5a5b9915f9fbf99e7bbca6e01 Signed-off-by: Yunhong Jiang <yunhong.jiang@linux.intel.com>
Diffstat (limited to 'kernel/kernel/trace')
-rw-r--r--kernel/kernel/trace/Makefile4
-rw-r--r--kernel/kernel/trace/ring_buffer.c35
-rw-r--r--kernel/kernel/trace/trace.c66
-rw-r--r--kernel/kernel/trace/trace_events.c17
-rw-r--r--kernel/kernel/trace/trace_functions_graph.c17
-rw-r--r--kernel/kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/kernel/trace/trace_printk.c10
7 files changed, 102 insertions, 55 deletions
diff --git a/kernel/kernel/trace/Makefile b/kernel/kernel/trace/Makefile
index 3bbaea068..bc08c6730 100644
--- a/kernel/kernel/trace/Makefile
+++ b/kernel/kernel/trace/Makefile
@@ -1,4 +1,8 @@
+# We are fully aware of the dangers of __builtin_return_address()
+FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
+KBUILD_CFLAGS += $(FRAME_CFLAGS)
+
# Do not instrument the tracer itself:
ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/kernel/trace/ring_buffer.c b/kernel/kernel/trace/ring_buffer.c
index 9c6045a27..acbb0e73d 100644
--- a/kernel/kernel/trace/ring_buffer.c
+++ b/kernel/kernel/trace/ring_buffer.c
@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
- unsigned int nr_pages;
+ unsigned long nr_pages;
unsigned int current_context;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
u64 write_stamp;
u64 read_stamp;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
- int nr_pages_to_update;
+ long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
struct work_struct update_pages_work;
struct completion update_done;
@@ -1137,10 +1137,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
return 0;
}
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
{
- int i;
struct buffer_page *bpage, *tmp;
+ long i;
for (i = 0; i < nr_pages; i++) {
struct page *page;
@@ -1177,7 +1177,7 @@ free_pages:
}
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
- unsigned nr_pages)
+ unsigned long nr_pages)
{
LIST_HEAD(pages);
@@ -1202,7 +1202,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
}
static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
@@ -1302,8 +1302,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct ring_buffer *buffer;
+ long nr_pages;
int bsize;
- int cpu, nr_pages;
+ int cpu;
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1429,12 +1430,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
}
static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
struct buffer_page *to_remove_page, *tmp_iter_page;
struct buffer_page *last_page, *first_page;
- unsigned int nr_removed;
+ unsigned long nr_removed;
unsigned long head_bit;
int page_entries;
@@ -1651,7 +1652,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
int cpu_id)
{
struct ring_buffer_per_cpu *cpu_buffer;
- unsigned nr_pages;
+ unsigned long nr_pages;
int cpu, err = 0;
/*
@@ -1665,14 +1666,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
!cpumask_test_cpu(cpu_id, buffer->cpumask))
return size;
- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
- size *= BUF_PAGE_SIZE;
+ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
/* we need a minimum of two pages */
- if (size < BUF_PAGE_SIZE * 2)
- size = BUF_PAGE_SIZE * 2;
+ if (nr_pages < 2)
+ nr_pages = 2;
- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ size = nr_pages * BUF_PAGE_SIZE;
/*
* Don't succeed if resizing is disabled, as a reader might be
@@ -4645,8 +4645,9 @@ static int rb_cpu_notify(struct notifier_block *self,
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
long cpu = (long)hcpu;
- int cpu_i, nr_pages_same;
- unsigned int nr_pages;
+ long nr_pages_same;
+ int cpu_i;
+ unsigned long nr_pages;
switch (action) {
case CPU_UP_PREPARE:
diff --git a/kernel/kernel/trace/trace.c b/kernel/kernel/trace/trace.c
index f1ee84b00..cad1a28bf 100644
--- a/kernel/kernel/trace/trace.c
+++ b/kernel/kernel/trace/trace.c
@@ -2559,17 +2559,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _--------=> CPU# \n"
- "# / _-------=> irqs-off \n"
- "# | / _------=> need-resched \n"
- "# || / _-----=> need-resched_lazy \n"
- "# ||| / _----=> hardirq/softirq \n"
- "# |||| / _---=> preempt-depth \n"
- "# ||||| / _--=> preempt-lazy-depth\n"
- "# |||||| / _-=> migrate-disable \n"
- "# ||||||| / delay \n"
- "# cmd pid |||||||| time | caller \n"
- "# \\ / |||||||| \\ | / \n");
+ seq_puts(m, "# _--------=> CPU# \n"
+ "# / _-------=> irqs-off \n"
+ "# | / _------=> need-resched \n"
+ "# || / _-----=> need-resched_lazy \n"
+ "# ||| / _----=> hardirq/softirq \n"
+ "# |||| / _---=> preempt-depth \n"
+ "# ||||| / _--=> preempt-lazy-depth\n"
+ "# |||||| / _-=> migrate-disable \n"
+ "# ||||||| / delay \n"
+ "# cmd pid |||||||| time | caller \n"
+ "# \\ / |||||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
@@ -2598,11 +2598,11 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
"# |/ _-----=> need-resched_lazy\n"
"# || / _---=> hardirq/softirq\n"
"# ||| / _--=> preempt-depth\n"
- "# |||| /_--=> preempt-lazy-depth\n"
- "# ||||| _-=> migrate-disable \n"
- "# ||||| / delay\n"
- "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n"
- "# | | | |||||| | |\n");
+ "# |||| / _-=> preempt-lazy-depth\n"
+ "# ||||| / _-=> migrate-disable \n"
+ "# |||||| / delay\n"
+ "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"
+ "# | | | ||||||| | |\n");
}
void
@@ -4737,19 +4737,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
struct trace_iterator *iter = filp->private_data;
ssize_t sret;
- /* return any leftover data */
- sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (sret != -EBUSY)
- return sret;
-
- trace_seq_init(&iter->seq);
-
/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
mutex_lock(&iter->mutex);
+
+ /* return any leftover data */
+ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+ if (sret != -EBUSY)
+ goto out;
+
+ trace_seq_init(&iter->seq);
+
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
@@ -4959,7 +4960,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
spd.nr_pages = i;
- ret = splice_to_pipe(pipe, &spd);
+ if (i)
+ ret = splice_to_pipe(pipe, &spd);
+ else
+ ret = 0;
out:
splice_shrink_spd(&spd);
return ret;
@@ -5773,9 +5777,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
return -EBUSY;
#endif
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
if (*ppos & (PAGE_SIZE - 1))
return -EINVAL;
@@ -5785,6 +5786,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
len &= PAGE_MASK;
}
+ if (splice_grow_spd(pipe, &spd))
+ return -ENOMEM;
+
again:
trace_access_lock(iter->cpu_file);
entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -5842,19 +5846,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
/* did we read anything? */
if (!spd.nr_pages) {
if (ret)
- return ret;
+ goto out;
+ ret = -EAGAIN;
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
- return -EAGAIN;
+ goto out;
ret = wait_on_pipe(iter, true);
if (ret)
- return ret;
+ goto out;
goto again;
}
ret = splice_to_pipe(pipe, &spd);
+out:
splice_shrink_spd(&spd);
return ret;
diff --git a/kernel/kernel/trace/trace_events.c b/kernel/kernel/trace/trace_events.c
index 0e508e99b..5bd79b347 100644
--- a/kernel/kernel/trace/trace_events.c
+++ b/kernel/kernel/trace/trace_events.c
@@ -246,6 +246,14 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count();
+ /*
+ * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
+ * preemption (adding one to the preempt_count). Since we are
+ * interested in the preempt_count at the time the tracepoint was
+ * hit, we need to subtract one to offset the increment.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ fbuffer->pc--;
fbuffer->trace_file = trace_file;
fbuffer->event =
@@ -2109,8 +2117,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops);
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+ /*
+ * Only event directories that can be enabled should have
+ * triggers.
+ */
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ trace_create_file("trigger", 0644, file->dir, file,
+ &event_trigger_fops);
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/kernel/trace/trace_functions_graph.c b/kernel/kernel/trace/trace_functions_graph.c
index a663cbb84..7fd6f5a26 100644
--- a/kernel/kernel/trace/trace_functions_graph.c
+++ b/kernel/kernel/trace/trace_functions_graph.c
@@ -780,6 +780,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
/*
* Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments
@@ -788,7 +792,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = 0;
}
@@ -818,11 +823,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
/* Save this function pointer to see if the exit matches */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = call->func;
}
@@ -1052,7 +1062,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
*/
cpu_data->depth = trace->depth - 1;
- if (trace->depth < FTRACE_RETFUNC_DEPTH) {
+ if (trace->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(trace->depth < 0)) {
if (cpu_data->enter_funcs[trace->depth] != trace->func)
func_match = 0;
cpu_data->enter_funcs[trace->depth] = 0;
diff --git a/kernel/kernel/trace/trace_irqsoff.c b/kernel/kernel/trace/trace_irqsoff.c
index 069942c22..553e71254 100644
--- a/kernel/kernel/trace/trace_irqsoff.c
+++ b/kernel/kernel/trace/trace_irqsoff.c
@@ -110,8 +110,12 @@ static int func_prolog_dec(struct trace_array *tr,
return 0;
local_save_flags(*flags);
- /* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(*flags))
+ /*
+ * Slight chance to get a false positive on tracing_cpu,
+ * although I'm starting to think there isn't a chance.
+ * Leave this for now just to be paranoid.
+ */
+ if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/kernel/trace/trace_printk.c b/kernel/kernel/trace/trace_printk.c
index 060df67db..ad1d6164e 100644
--- a/kernel/kernel/trace/trace_printk.c
+++ b/kernel/kernel/trace/trace_printk.c
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
{
struct trace_bprintk_fmt *pos;
+
+ if (!fmt)
+ return ERR_PTR(-EINVAL);
+
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
if (!strcmp(pos->fmt, fmt))
return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
for (iter = start; iter < end; iter++) {
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
if (tb_fmt) {
- *iter = tb_fmt->fmt;
+ if (!IS_ERR(tb_fmt))
+ *iter = tb_fmt->fmt;
continue;
}
@@ -296,6 +301,9 @@ static int t_show(struct seq_file *m, void *v)
const char *str = *fmt;
int i;
+ if (!*fmt)
+ return 0;
+
seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
/*