summaryrefslogtreecommitdiffstats
path: root/kernel/arch/powerpc/oprofile/cell
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/powerpc/oprofile/cell')
-rw-r--r--kernel/arch/powerpc/oprofile/cell/pr_util.h114
-rw-r--r--kernel/arch/powerpc/oprofile/cell/spu_profiler.c252
-rw-r--r--kernel/arch/powerpc/oprofile/cell/spu_task_sync.c661
-rw-r--r--kernel/arch/powerpc/oprofile/cell/vma_map.c283
4 files changed, 1310 insertions, 0 deletions
diff --git a/kernel/arch/powerpc/oprofile/cell/pr_util.h b/kernel/arch/powerpc/oprofile/cell/pr_util.h
new file mode 100644
index 000000000..964b93974
--- /dev/null
+++ b/kernel/arch/powerpc/oprofile/cell/pr_util.h
@@ -0,0 +1,114 @@
+ /*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef PR_UTIL_H
+#define PR_UTIL_H
+
+#include <linux/cpumask.h>
+#include <linux/oprofile.h>
+#include <asm/cell-pmu.h>
+#include <asm/cell-regs.h>
+#include <asm/spu.h>
+
+/* Defines used for sync_start */
+#define SKIP_GENERIC_SYNC 0
+#define SYNC_START_ERROR -1
+#define DO_GENERIC_SYNC 1
+#define SPUS_PER_NODE 8
+#define DEFAULT_TIMER_EXPIRE (HZ / 10)
+
+extern struct delayed_work spu_work;
+extern int spu_prof_running;
+
+#define TRACE_ARRAY_SIZE 1024
+
+extern spinlock_t oprof_spu_smpl_arry_lck;
+
+struct spu_overlay_info { /* map of sections within an SPU overlay */
+ unsigned int vma; /* SPU virtual memory address from elf */
+ unsigned int size; /* size of section from elf */
+ unsigned int offset; /* offset of section into elf file */
+ unsigned int buf;
+};
+
+struct vma_to_fileoffset_map { /* map of sections within an SPU program */
+ struct vma_to_fileoffset_map *next; /* list pointer */
+ unsigned int vma; /* SPU virtual memory address from elf */
+ unsigned int size; /* size of section from elf */
+ unsigned int offset; /* offset of section into elf file */
+ unsigned int guard_ptr;
+ unsigned int guard_val;
+ /*
+ * The guard pointer is an entry in the _ovly_buf_table,
+ * computed using ovly.buf as the index into the table. Since
+ * ovly.buf values begin at '1' to reference the first (or 0th)
+ * entry in the _ovly_buf_table, the computation subtracts 1
+ * from ovly.buf.
+ * The guard value is stored in the _ovly_buf_table entry and
+ * is an index (starting at 1) back to the _ovly_table entry
+ * that is pointing at this _ovly_buf_table entry. So, for
+ * example, for an overlay scenario with one overlay segment
+ * and two overlay sections:
+ * - Section 1 points to the first entry of the
+ * _ovly_buf_table, which contains a guard value
+ * of '1', referencing the first (index=0) entry of
+ * _ovly_table.
+ * - Section 2 points to the second entry of the
+ * _ovly_buf_table, which contains a guard value
+ * of '2', referencing the second (index=1) entry of
+ * _ovly_table.
+ */
+
+};
+
+struct spu_buffer {
+ int last_guard_val;
+ int ctx_sw_seen;
+ unsigned long *buff;
+ unsigned int head, tail;
+};
+
+
+/* The three functions below are for maintaining and accessing
+ * the vma-to-fileoffset map.
+ */
+struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
+ unsigned long objectid);
+unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
+ unsigned int vma, const struct spu *aSpu,
+ int *grd_val);
+void vma_map_free(struct vma_to_fileoffset_map *map);
+
+/*
+ * Entry point for SPU profiling.
+ * cycles_reset is the SPU_CYCLES count value specified by the user.
+ */
+int start_spu_profiling_cycles(unsigned int cycles_reset);
+void start_spu_profiling_events(void);
+
+void stop_spu_profiling_cycles(void);
+void stop_spu_profiling_events(void);
+
+/* add the necessary profiling hooks */
+int spu_sync_start(void);
+
+/* remove the hooks */
+int spu_sync_stop(void);
+
+/* Record SPU program counter samples to the oprofile event buffer. */
+void spu_sync_buffer(int spu_num, unsigned int *samples,
+ int num_samples);
+
+void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
+
+#endif /* PR_UTIL_H */
diff --git a/kernel/arch/powerpc/oprofile/cell/spu_profiler.c b/kernel/arch/powerpc/oprofile/cell/spu_profiler.c
new file mode 100644
index 000000000..b129d007e
--- /dev/null
+++ b/kernel/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -0,0 +1,252 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Authors: Maynard Johnson <maynardj@us.ibm.com>
+ * Carl Love <carll@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/hrtimer.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <asm/cell-pmu.h>
+#include <asm/time.h>
+#include "pr_util.h"
+
+#define SCALE_SHIFT 14
+
+static u32 *samples;
+
+/* spu_prof_running is a flag used to indicate if spu profiling is enabled
+ * or not. It is set by the routines start_spu_profiling_cycles() and
+ * start_spu_profiling_events(). The flag is cleared by the routines
+ * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
+ * routines are called via global_start() and global_stop() which are called in
+ * op_powerpc_start() and op_powerpc_stop(). These routines are called once
+ * per system as a result of the user starting/stopping oprofile. Hence, only
+ * one CPU per user at a time will be changing the value of spu_prof_running.
+ * In general, OProfile does not protect against multiple users trying to run
+ * OProfile at a time.
+ */
+int spu_prof_running;
+static unsigned int profiling_interval;
+
+#define NUM_SPU_BITS_TRBUF 16
+#define SPUS_PER_TB_ENTRY 4
+
+#define SPU_PC_MASK 0xFFFF
+
+DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
+unsigned long oprof_spu_smpl_arry_lck_flags;
+
+void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
+{
+ unsigned long ns_per_cyc;
+
+ if (!freq_khz)
+ freq_khz = ppc_proc_freq/1000;
+
+ /* To calculate a timeout in nanoseconds, the basic
+ * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
+ * To avoid floating point math, we use the scale math
+ * technique as described in linux/jiffies.h. We use
+ * a scale factor of SCALE_SHIFT, which provides 4 decimal places
+ * of precision. This is close enough for the purpose at hand.
+ *
+ * The value of the timeout should be small enough that the hw
+ * trace buffer will not get more than about 1/3 full for the
+ * maximum user specified (the LFSR value) hw sampling frequency.
+ * This is to ensure the trace buffer will never fill even if the
+ * kernel thread scheduling varies under a heavy system load.
+ */
+
+ ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
+ profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
+
+}
+
+/*
+ * Extract SPU PC from trace buffer entry
+ */
+static void spu_pc_extract(int cpu, int entry)
+{
+ /* the trace buffer is 128 bits */
+ u64 trace_buffer[2];
+ u64 spu_mask;
+ int spu;
+
+ spu_mask = SPU_PC_MASK;
+
+ /* Each SPU PC is 16 bits; hence, four spus in each of
+ * the two 64-bit buffer entries that make up the
+ * 128-bit trace_buffer entry. Process two 64-bit values
+ * simultaneously.
+ * trace[0] SPU PC contents are: 0 1 2 3
+ * trace[1] SPU PC contents are: 4 5 6 7
+ */
+
+ cbe_read_trace_buffer(cpu, trace_buffer);
+
+ for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
+ /* spu PC trace entry is upper 16 bits of the
+ * 18 bit SPU program counter
+ */
+ samples[spu * TRACE_ARRAY_SIZE + entry]
+ = (spu_mask & trace_buffer[0]) << 2;
+ samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
+ = (spu_mask & trace_buffer[1]) << 2;
+
+ trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
+ trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
+ }
+}
+
+static int cell_spu_pc_collection(int cpu)
+{
+ u32 trace_addr;
+ int entry;
+
+ /* process the collected SPU PC for the node */
+
+ entry = 0;
+
+ trace_addr = cbe_read_pm(cpu, trace_address);
+ while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
+ /* there is data in the trace buffer to process */
+ spu_pc_extract(cpu, entry);
+
+ entry++;
+
+ if (entry >= TRACE_ARRAY_SIZE)
+ /* spu_samples is full */
+ break;
+
+ trace_addr = cbe_read_pm(cpu, trace_address);
+ }
+
+ return entry;
+}
+
+
+static enum hrtimer_restart profile_spus(struct hrtimer *timer)
+{
+ ktime_t kt;
+ int cpu, node, k, num_samples, spu_num;
+
+ if (!spu_prof_running)
+ goto stop;
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ node = cbe_cpu_to_node(cpu);
+
+ /* There should only be one kernel thread at a time processing
+ * the samples. In the very unlikely case that the processing
+ * is taking a very long time and multiple kernel threads are
+ * started to process the samples. Make sure only one kernel
+ * thread is working on the samples array at a time. The
+ * sample array must be loaded and then processed for a given
+ * cpu. The sample array is not per cpu.
+ */
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
+ num_samples = cell_spu_pc_collection(cpu);
+
+ if (num_samples == 0) {
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
+ continue;
+ }
+
+ for (k = 0; k < SPUS_PER_NODE; k++) {
+ spu_num = k + (node * SPUS_PER_NODE);
+ spu_sync_buffer(spu_num,
+ samples + (k * TRACE_ARRAY_SIZE),
+ num_samples);
+ }
+
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
+
+ }
+ smp_wmb(); /* insure spu event buffer updates are written */
+ /* don't want events intermingled... */
+
+ kt = ktime_set(0, profiling_interval);
+ if (!spu_prof_running)
+ goto stop;
+ hrtimer_forward(timer, timer->base->get_time(), kt);
+ return HRTIMER_RESTART;
+
+ stop:
+ printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
+ return HRTIMER_NORESTART;
+}
+
+static struct hrtimer timer;
+/*
+ * Entry point for SPU cycle profiling.
+ * NOTE: SPU profiling is done system-wide, not per-CPU.
+ *
+ * cycles_reset is the count value specified by the user when
+ * setting up OProfile to count SPU_CYCLES.
+ */
+int start_spu_profiling_cycles(unsigned int cycles_reset)
+{
+ ktime_t kt;
+
+ pr_debug("timer resolution: %lu\n", TICK_NSEC);
+ kt = ktime_set(0, profiling_interval);
+ hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&timer, kt);
+ timer.function = profile_spus;
+
+ /* Allocate arrays for collecting SPU PC samples */
+ samples = kzalloc(SPUS_PER_NODE *
+ TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);
+
+ if (!samples)
+ return -ENOMEM;
+
+ spu_prof_running = 1;
+ hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+
+ return 0;
+}
+
+/*
+ * Entry point for SPU event profiling.
+ * NOTE: SPU profiling is done system-wide, not per-CPU.
+ *
+ * cycles_reset is the count value specified by the user when
+ * setting up OProfile to count SPU_CYCLES.
+ */
+void start_spu_profiling_events(void)
+{
+ spu_prof_running = 1;
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+
+ return;
+}
+
+void stop_spu_profiling_cycles(void)
+{
+ spu_prof_running = 0;
+ hrtimer_cancel(&timer);
+ kfree(samples);
+ pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
+}
+
+void stop_spu_profiling_events(void)
+{
+ spu_prof_running = 0;
+}
diff --git a/kernel/arch/powerpc/oprofile/cell/spu_task_sync.c b/kernel/arch/powerpc/oprofile/cell/spu_task_sync.c
new file mode 100644
index 000000000..ed7b09770
--- /dev/null
+++ b/kernel/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -0,0 +1,661 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* The purpose of this file is to handle SPU event task switching
+ * and to record SPU context information into the OProfile
+ * event buffer.
+ *
+ * Additionally, the spu_sync_buffer function is provided as a helper
+ * for recoding actual SPU program counter samples to the event buffer.
+ */
+#include <linux/dcookies.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/numa.h>
+#include <linux/oprofile.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include "pr_util.h"
+
+#define RELEASE_ALL 9999
+
+static DEFINE_SPINLOCK(buffer_lock);
+static DEFINE_SPINLOCK(cache_lock);
+static int num_spu_nodes;
+int spu_prof_num_nodes;
+
+struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
+struct delayed_work spu_work;
+static unsigned max_spu_buff;
+
+static void spu_buff_add(unsigned long int value, int spu)
+{
+ /* spu buff is a circular buffer. Add entries to the
+ * head. Head is the index to store the next value.
+ * The buffer is full when there is one available entry
+ * in the queue, i.e. head and tail can't be equal.
+ * That way we can tell the difference between the
+ * buffer being full versus empty.
+ *
+ * ASSUPTION: the buffer_lock is held when this function
+ * is called to lock the buffer, head and tail.
+ */
+ int full = 1;
+
+ if (spu_buff[spu].head >= spu_buff[spu].tail) {
+ if ((spu_buff[spu].head - spu_buff[spu].tail)
+ < (max_spu_buff - 1))
+ full = 0;
+
+ } else if (spu_buff[spu].tail > spu_buff[spu].head) {
+ if ((spu_buff[spu].tail - spu_buff[spu].head)
+ > 1)
+ full = 0;
+ }
+
+ if (!full) {
+ spu_buff[spu].buff[spu_buff[spu].head] = value;
+ spu_buff[spu].head++;
+
+ if (spu_buff[spu].head >= max_spu_buff)
+ spu_buff[spu].head = 0;
+ } else {
+ /* From the user's perspective make the SPU buffer
+ * size management/overflow look like we are using
+ * per cpu buffers. The user uses the same
+ * per cpu parameter to adjust the SPU buffer size.
+ * Increment the sample_lost_overflow to inform
+ * the user the buffer size needs to be increased.
+ */
+ oprofile_cpu_buffer_inc_smpl_lost();
+ }
+}
+
+/* This function copies the per SPU buffers to the
+ * OProfile kernel buffer.
+ */
+void sync_spu_buff(void)
+{
+ int spu;
+ unsigned long flags;
+ int curr_head;
+
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ /* In case there was an issue and the buffer didn't
+ * get created skip it.
+ */
+ if (spu_buff[spu].buff == NULL)
+ continue;
+
+ /* Hold the lock to make sure the head/tail
+ * doesn't change while spu_buff_add() is
+ * deciding if the buffer is full or not.
+ * Being a little paranoid.
+ */
+ spin_lock_irqsave(&buffer_lock, flags);
+ curr_head = spu_buff[spu].head;
+ spin_unlock_irqrestore(&buffer_lock, flags);
+
+ /* Transfer the current contents to the kernel buffer.
+ * data can still be added to the head of the buffer.
+ */
+ oprofile_put_buff(spu_buff[spu].buff,
+ spu_buff[spu].tail,
+ curr_head, max_spu_buff);
+
+ spin_lock_irqsave(&buffer_lock, flags);
+ spu_buff[spu].tail = curr_head;
+ spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+
+}
+
+static void wq_sync_spu_buff(struct work_struct *work)
+{
+ /* move data from spu buffers to kernel buffer */
+ sync_spu_buff();
+
+ /* only reschedule if profiling is not done */
+ if (spu_prof_running)
+ schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+}
+
+/* Container for caching information about an active SPU task. */
+struct cached_info {
+ struct vma_to_fileoffset_map *map;
+ struct spu *the_spu; /* needed to access pointer to local_store */
+ struct kref cache_ref;
+};
+
+static struct cached_info *spu_info[MAX_NUMNODES * 8];
+
+static void destroy_cached_info(struct kref *kref)
+{
+ struct cached_info *info;
+
+ info = container_of(kref, struct cached_info, cache_ref);
+ vma_map_free(info->map);
+ kfree(info);
+ module_put(THIS_MODULE);
+}
+
+/* Return the cached_info for the passed SPU number.
+ * ATTENTION: Callers are responsible for obtaining the
+ * cache_lock if needed prior to invoking this function.
+ */
+static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
+{
+ struct kref *ref;
+ struct cached_info *ret_info;
+
+ if (spu_num >= num_spu_nodes) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Invalid index %d into spu info cache\n",
+ __func__, __LINE__, spu_num);
+ ret_info = NULL;
+ goto out;
+ }
+ if (!spu_info[spu_num] && the_spu) {
+ ref = spu_get_profile_private_kref(the_spu->ctx);
+ if (ref) {
+ spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
+ kref_get(&spu_info[spu_num]->cache_ref);
+ }
+ }
+
+ ret_info = spu_info[spu_num];
+ out:
+ return ret_info;
+}
+
+
+/* Looks for cached info for the passed spu. If not found, the
+ * cached info is created for the passed spu.
+ * Returns 0 for success; otherwise, -1 for error.
+ */
+static int
+prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
+{
+ unsigned long flags;
+ struct vma_to_fileoffset_map *new_map;
+ int retval = 0;
+ struct cached_info *info;
+
+ /* We won't bother getting cache_lock here since
+ * don't do anything with the cached_info that's returned.
+ */
+ info = get_cached_info(spu, spu->number);
+
+ if (info) {
+ pr_debug("Found cached SPU info.\n");
+ goto out;
+ }
+
+ /* Create cached_info and set spu_info[spu->number] to point to it.
+ * spu->number is a system-wide value, not a per-node value.
+ */
+ info = kzalloc(sizeof(struct cached_info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: create vma_map failed\n",
+ __func__, __LINE__);
+ retval = -ENOMEM;
+ goto err_alloc;
+ }
+ new_map = create_vma_map(spu, objectId);
+ if (!new_map) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: create vma_map failed\n",
+ __func__, __LINE__);
+ retval = -ENOMEM;
+ goto err_alloc;
+ }
+
+ pr_debug("Created vma_map\n");
+ info->map = new_map;
+ info->the_spu = spu;
+ kref_init(&info->cache_ref);
+ spin_lock_irqsave(&cache_lock, flags);
+ spu_info[spu->number] = info;
+ /* Increment count before passing off ref to SPUFS. */
+ kref_get(&info->cache_ref);
+
+ /* We increment the module refcount here since SPUFS is
+ * responsible for the final destruction of the cached_info,
+ * and it must be able to access the destroy_cached_info()
+ * function defined in the OProfile module. We decrement
+ * the module refcount in destroy_cached_info.
+ */
+ try_module_get(THIS_MODULE);
+ spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
+ destroy_cached_info);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ goto out;
+
+err_alloc:
+ kfree(info);
+out:
+ return retval;
+}
+
+/*
+ * NOTE: The caller is responsible for locking the
+ * cache_lock prior to calling this function.
+ */
+static int release_cached_info(int spu_index)
+{
+ int index, end;
+
+ if (spu_index == RELEASE_ALL) {
+ end = num_spu_nodes;
+ index = 0;
+ } else {
+ if (spu_index >= num_spu_nodes) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: "
+ "Invalid index %d into spu info cache\n",
+ __func__, __LINE__, spu_index);
+ goto out;
+ }
+ end = spu_index + 1;
+ index = spu_index;
+ }
+ for (; index < end; index++) {
+ if (spu_info[index]) {
+ kref_put(&spu_info[index]->cache_ref,
+ destroy_cached_info);
+ spu_info[index] = NULL;
+ }
+ }
+
+out:
+ return 0;
+}
+
+/* The source code for fast_get_dcookie was "borrowed"
+ * from drivers/oprofile/buffer_sync.c.
+ */
+
+/* Optimisation. We can manage without taking the dcookie sem
+ * because we cannot reach this code without at least one
+ * dcookie user still being registered (namely, the reader
+ * of the event buffer).
+ */
+static inline unsigned long fast_get_dcookie(struct path *path)
+{
+ unsigned long cookie;
+
+ if (path->dentry->d_flags & DCACHE_COOKIE)
+ return (unsigned long)path->dentry;
+ get_dcookie(path, &cookie);
+ return cookie;
+}
+
+/* Look up the dcookie for the task's mm->exe_file,
+ * which corresponds loosely to "application name". Also, determine
+ * the offset for the SPU ELF object. If computed offset is
+ * non-zero, it implies an embedded SPU object; otherwise, it's a
+ * separate SPU binary, in which case we retrieve it's dcookie.
+ * For the embedded case, we must determine if SPU ELF is embedded
+ * in the executable application or another file (i.e., shared lib).
+ * If embedded in a shared lib, we must get the dcookie and return
+ * that to the caller.
+ */
+static unsigned long
+get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
+ unsigned long *spu_bin_dcookie,
+ unsigned long spu_ref)
+{
+ unsigned long app_cookie = 0;
+ unsigned int my_offset = 0;
+ struct vm_area_struct *vma;
+ struct file *exe_file;
+ struct mm_struct *mm = spu->mm;
+
+ if (!mm)
+ goto out;
+
+ exe_file = get_mm_exe_file(mm);
+ if (exe_file) {
+ app_cookie = fast_get_dcookie(&exe_file->f_path);
+ pr_debug("got dcookie for %pD\n", exe_file);
+ fput(exe_file);
+ }
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
+ continue;
+ my_offset = spu_ref - vma->vm_start;
+ if (!vma->vm_file)
+ goto fail_no_image_cookie;
+
+ pr_debug("Found spu ELF at %X(object-id:%lx) for file %pD\n",
+ my_offset, spu_ref, vma->vm_file);
+ *offsetp = my_offset;
+ break;
+ }
+
+ *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
+ pr_debug("got dcookie for %pD\n", vma->vm_file);
+
+ up_read(&mm->mmap_sem);
+
+out:
+ return app_cookie;
+
+fail_no_image_cookie:
+ up_read(&mm->mmap_sem);
+
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Cannot find dcookie for SPU binary\n",
+ __func__, __LINE__);
+ goto out;
+}
+
+
+
+/* This function finds or creates cached context information for the
+ * passed SPU and records SPU context information into the OProfile
+ * event buffer.
+ */
+static int process_context_switch(struct spu *spu, unsigned long objectId)
+{
+ unsigned long flags;
+ int retval;
+ unsigned int offset = 0;
+ unsigned long spu_cookie = 0, app_dcookie;
+
+ retval = prepare_cached_spu_info(spu, objectId);
+ if (retval)
+ goto out;
+
+ /* Get dcookie first because a mutex_lock is taken in that
+ * code path, so interrupts must not be disabled.
+ */
+ app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
+ if (!app_dcookie || !spu_cookie) {
+ retval = -ENOENT;
+ goto out;
+ }
+
+ /* Record context info in event buffer */
+ spin_lock_irqsave(&buffer_lock, flags);
+ spu_buff_add(ESCAPE_CODE, spu->number);
+ spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
+ spu_buff_add(spu->number, spu->number);
+ spu_buff_add(spu->pid, spu->number);
+ spu_buff_add(spu->tgid, spu->number);
+ spu_buff_add(app_dcookie, spu->number);
+ spu_buff_add(spu_cookie, spu->number);
+ spu_buff_add(offset, spu->number);
+
+ /* Set flag to indicate SPU PC data can now be written out. If
+ * the SPU program counter data is seen before an SPU context
+ * record is seen, the postprocessing will fail.
+ */
+ spu_buff[spu->number].ctx_sw_seen = 1;
+
+ spin_unlock_irqrestore(&buffer_lock, flags);
+ smp_wmb(); /* insure spu event buffer updates are written */
+ /* don't want entries intermingled... */
+out:
+ return retval;
+}
+
+/*
+ * This function is invoked on either a bind_context or unbind_context.
+ * If called for an unbind_context, the val arg is 0; otherwise,
+ * it is the object-id value for the spu context.
+ * The data arg is of type 'struct spu *'.
+ */
+static int spu_active_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ int retval;
+ unsigned long flags;
+ struct spu *the_spu = data;
+
+ pr_debug("SPU event notification arrived\n");
+ if (!val) {
+ spin_lock_irqsave(&cache_lock, flags);
+ retval = release_cached_info(the_spu->number);
+ spin_unlock_irqrestore(&cache_lock, flags);
+ } else {
+ retval = process_context_switch(the_spu, val);
+ }
+ return retval;
+}
+
+static struct notifier_block spu_active = {
+ .notifier_call = spu_active_notify,
+};
+
+static int number_of_online_nodes(void)
+{
+ u32 cpu; u32 tmp;
+ int nodes = 0;
+ for_each_online_cpu(cpu) {
+ tmp = cbe_cpu_to_node(cpu) + 1;
+ if (tmp > nodes)
+ nodes++;
+ }
+ return nodes;
+}
+
+static int oprofile_spu_buff_create(void)
+{
+ int spu;
+
+ max_spu_buff = oprofile_get_cpu_buffer_size();
+
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ /* create circular buffers to store the data in.
+ * use locks to manage accessing the buffers
+ */
+ spu_buff[spu].head = 0;
+ spu_buff[spu].tail = 0;
+
+ /*
+ * Create a buffer for each SPU. Can't reliably
+ * create a single buffer for all spus due to not
+ * enough contiguous kernel memory.
+ */
+
+ spu_buff[spu].buff = kzalloc((max_spu_buff
+ * sizeof(unsigned long)),
+ GFP_KERNEL);
+
+ if (!spu_buff[spu].buff) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: oprofile_spu_buff_create "
+ "failed to allocate spu buffer %d.\n",
+ __func__, __LINE__, spu);
+
+ /* release the spu buffers that have been allocated */
+ while (spu >= 0) {
+ kfree(spu_buff[spu].buff);
+ spu_buff[spu].buff = 0;
+ spu--;
+ }
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/* The main purpose of this function is to synchronize
+ * OProfile with SPUFS by registering to be notified of
+ * SPU task switches.
+ *
+ * NOTE: When profiling SPUs, we must ensure that only
+ * spu_sync_start is invoked and not the generic sync_start
+ * in drivers/oprofile/oprof.c. A return value of
+ * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
+ * accomplish this.
+ */
+int spu_sync_start(void)
+{
+ int spu;
+ int ret = SKIP_GENERIC_SYNC;
+ int register_ret;
+ unsigned long flags = 0;
+
+ spu_prof_num_nodes = number_of_online_nodes();
+ num_spu_nodes = spu_prof_num_nodes * 8;
+ INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
+
+ /* create buffer for storing the SPU data to put in
+ * the kernel buffer.
+ */
+ ret = oprofile_spu_buff_create();
+ if (ret)
+ goto out;
+
+ spin_lock_irqsave(&buffer_lock, flags);
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ spu_buff_add(ESCAPE_CODE, spu);
+ spu_buff_add(SPU_PROFILING_CODE, spu);
+ spu_buff_add(num_spu_nodes, spu);
+ }
+ spin_unlock_irqrestore(&buffer_lock, flags);
+
+ for (spu = 0; spu < num_spu_nodes; spu++) {
+ spu_buff[spu].ctx_sw_seen = 0;
+ spu_buff[spu].last_guard_val = 0;
+ }
+
+ /* Register for SPU events */
+ register_ret = spu_switch_event_register(&spu_active);
+ if (register_ret) {
+ ret = SYNC_START_ERROR;
+ goto out;
+ }
+
+ pr_debug("spu_sync_start -- running.\n");
+out:
+ return ret;
+}
+
+/* Record SPU program counter samples to the oprofile event buffer. */
+void spu_sync_buffer(int spu_num, unsigned int *samples,
+ int num_samples)
+{
+ unsigned long long file_offset;
+ unsigned long flags;
+ int i;
+ struct vma_to_fileoffset_map *map;
+ struct spu *the_spu;
+ unsigned long long spu_num_ll = spu_num;
+ unsigned long long spu_num_shifted = spu_num_ll << 32;
+ struct cached_info *c_info;
+
+ /* We need to obtain the cache_lock here because it's
+ * possible that after getting the cached_info, the SPU job
+ * corresponding to this cached_info may end, thus resulting
+ * in the destruction of the cached_info.
+ */
+ spin_lock_irqsave(&cache_lock, flags);
+ c_info = get_cached_info(NULL, spu_num);
+ if (!c_info) {
+ /* This legitimately happens when the SPU task ends before all
+ * samples are recorded.
+ * No big deal -- so we just drop a few samples.
+ */
+ pr_debug("SPU_PROF: No cached SPU contex "
+ "for SPU #%d. Dropping samples.\n", spu_num);
+ goto out;
+ }
+
+ map = c_info->map;
+ the_spu = c_info->the_spu;
+ spin_lock(&buffer_lock);
+ for (i = 0; i < num_samples; i++) {
+ unsigned int sample = *(samples+i);
+ int grd_val = 0;
+ file_offset = 0;
+ if (sample == 0)
+ continue;
+ file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
+
+ /* If overlays are used by this SPU application, the guard
+ * value is non-zero, indicating which overlay section is in
+ * use. We need to discard samples taken during the time
+ * period which an overlay occurs (i.e., guard value changes).
+ */
+ if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
+ spu_buff[spu_num].last_guard_val = grd_val;
+ /* Drop the rest of the samples. */
+ break;
+ }
+
+ /* We must ensure that the SPU context switch has been written
+ * out before samples for the SPU. Otherwise, the SPU context
+ * information is not available and the postprocessing of the
+ * SPU PC will fail with no available anonymous map information.
+ */
+ if (spu_buff[spu_num].ctx_sw_seen)
+ spu_buff_add((file_offset | spu_num_shifted),
+ spu_num);
+ }
+ spin_unlock(&buffer_lock);
+out:
+ spin_unlock_irqrestore(&cache_lock, flags);
+}
+
+
+int spu_sync_stop(void)
+{
+ unsigned long flags = 0;
+ int ret;
+ int k;
+
+ ret = spu_switch_event_unregister(&spu_active);
+
+ if (ret)
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: spu_switch_event_unregister " \
+ "returned %d\n",
+ __func__, __LINE__, ret);
+
+ /* flush any remaining data in the per SPU buffers */
+ sync_spu_buff();
+
+ spin_lock_irqsave(&cache_lock, flags);
+ ret = release_cached_info(RELEASE_ALL);
+ spin_unlock_irqrestore(&cache_lock, flags);
+
+ /* remove scheduled work queue item rather then waiting
+ * for every queued entry to execute. Then flush pending
+ * system wide buffer to event buffer.
+ */
+ cancel_delayed_work(&spu_work);
+
+ for (k = 0; k < num_spu_nodes; k++) {
+ spu_buff[k].ctx_sw_seen = 0;
+
+ /*
+ * spu_sys_buff will be null if there was a problem
+ * allocating the buffer. Only delete if it exists.
+ */
+ kfree(spu_buff[k].buff);
+ spu_buff[k].buff = 0;
+ }
+ pr_debug("spu_sync_stop -- done.\n");
+ return ret;
+}
+
diff --git a/kernel/arch/powerpc/oprofile/cell/vma_map.c b/kernel/arch/powerpc/oprofile/cell/vma_map.c
new file mode 100644
index 000000000..c579b1684
--- /dev/null
+++ b/kernel/arch/powerpc/oprofile/cell/vma_map.c
@@ -0,0 +1,283 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* The code in this source file is responsible for generating
+ * vma-to-fileOffset maps for both overlay and non-overlay SPU
+ * applications.
+ */
+
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/slab.h>
+#include "pr_util.h"
+
+
+void vma_map_free(struct vma_to_fileoffset_map *map)
+{
+ while (map) {
+ struct vma_to_fileoffset_map *next = map->next;
+ kfree(map);
+ map = next;
+ }
+}
+
+unsigned int
+vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
+ const struct spu *aSpu, int *grd_val)
+{
+ /*
+ * Default the offset to the physical address + a flag value.
+ * Addresses of dynamically generated code can't be found in the vma
+ * map. For those addresses the flagged value will be sent on to
+ * the user space tools so they can be reported rather than just
+ * thrown away.
+ */
+ u32 offset = 0x10000000 + vma;
+ u32 ovly_grd;
+
+ for (; map; map = map->next) {
+ if (vma < map->vma || vma >= map->vma + map->size)
+ continue;
+
+ if (map->guard_ptr) {
+ ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
+ if (ovly_grd != map->guard_val)
+ continue;
+ *grd_val = ovly_grd;
+ }
+ offset = vma - map->vma + map->offset;
+ break;
+ }
+
+ return offset;
+}
+
+static struct vma_to_fileoffset_map *
+vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
+ unsigned int size, unsigned int offset, unsigned int guard_ptr,
+ unsigned int guard_val)
+{
+ struct vma_to_fileoffset_map *new =
+ kzalloc(sizeof(struct vma_to_fileoffset_map), GFP_KERNEL);
+ if (!new) {
+ printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
+ __func__, __LINE__);
+ vma_map_free(map);
+ return NULL;
+ }
+
+ new->next = map;
+ new->vma = vma;
+ new->size = size;
+ new->offset = offset;
+ new->guard_ptr = guard_ptr;
+ new->guard_val = guard_val;
+
+ return new;
+}
+
+
+/* Parse SPE ELF header and generate a list of vma_maps.
+ * A pointer to the first vma_map in the generated list
+ * of vma_maps is returned. */
+struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
+ unsigned long __spu_elf_start)
+{
+ static const unsigned char expected[EI_PAD] = {
+ [EI_MAG0] = ELFMAG0,
+ [EI_MAG1] = ELFMAG1,
+ [EI_MAG2] = ELFMAG2,
+ [EI_MAG3] = ELFMAG3,
+ [EI_CLASS] = ELFCLASS32,
+ [EI_DATA] = ELFDATA2MSB,
+ [EI_VERSION] = EV_CURRENT,
+ [EI_OSABI] = ELFOSABI_NONE
+ };
+
+ int grd_val;
+ struct vma_to_fileoffset_map *map = NULL;
+ void __user *spu_elf_start = (void __user *)__spu_elf_start;
+ struct spu_overlay_info ovly;
+ unsigned int overlay_tbl_offset = -1;
+ Elf32_Phdr __user *phdr_start;
+ Elf32_Shdr __user *shdr_start;
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr phdr;
+ Elf32_Shdr shdr, shdr_str;
+ Elf32_Sym sym;
+ int i, j;
+ char name[32];
+
+ unsigned int ovly_table_sym = 0;
+ unsigned int ovly_buf_table_sym = 0;
+ unsigned int ovly_table_end_sym = 0;
+ unsigned int ovly_buf_table_end_sym = 0;
+ struct spu_overlay_info __user *ovly_table;
+ unsigned int n_ovlys;
+
+ /* Get and validate ELF header. */
+
+ if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
+ goto fail;
+
+ if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
+ __func__, __LINE__);
+ goto fail;
+ }
+ if (ehdr.e_machine != EM_SPU) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
+ __func__, __LINE__);
+ goto fail;
+ }
+ if (ehdr.e_type != ET_EXEC) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Unexpected e_type parsing SPU ELF\n",
+ __func__, __LINE__);
+ goto fail;
+ }
+ phdr_start = spu_elf_start + ehdr.e_phoff;
+ shdr_start = spu_elf_start + ehdr.e_shoff;
+
+ /* Traverse program headers. */
+ for (i = 0; i < ehdr.e_phnum; i++) {
+ if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
+ goto fail;
+
+ if (phdr.p_type != PT_LOAD)
+ continue;
+ if (phdr.p_flags & (1 << 27))
+ continue;
+
+ map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
+ phdr.p_offset, 0, 0);
+ if (!map)
+ goto fail;
+ }
+
+ pr_debug("SPU_PROF: Created non-overlay maps\n");
+ /* Traverse section table and search for overlay-related symbols. */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
+ goto fail;
+
+ if (shdr.sh_type != SHT_SYMTAB)
+ continue;
+ if (shdr.sh_entsize != sizeof (sym))
+ continue;
+
+ if (copy_from_user(&shdr_str,
+ shdr_start + shdr.sh_link,
+ sizeof(shdr)))
+ goto fail;
+
+ if (shdr_str.sh_type != SHT_STRTAB)
+ goto fail;
+
+ for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
+ if (copy_from_user(&sym, spu_elf_start +
+ shdr.sh_offset +
+ j * sizeof (sym),
+ sizeof (sym)))
+ goto fail;
+
+ if (copy_from_user(name,
+ spu_elf_start + shdr_str.sh_offset +
+ sym.st_name,
+ 20))
+ goto fail;
+
+ if (memcmp(name, "_ovly_table", 12) == 0)
+ ovly_table_sym = sym.st_value;
+ if (memcmp(name, "_ovly_buf_table", 16) == 0)
+ ovly_buf_table_sym = sym.st_value;
+ if (memcmp(name, "_ovly_table_end", 16) == 0)
+ ovly_table_end_sym = sym.st_value;
+ if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
+ ovly_buf_table_end_sym = sym.st_value;
+ }
+ }
+
+ /* If we don't have overlays, we're done. */
+ if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
+ || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
+ pr_debug("SPU_PROF: No overlay table found\n");
+ goto out;
+ } else {
+ pr_debug("SPU_PROF: Overlay table found\n");
+ }
+
+ /* The _ovly_table symbol represents a table with one entry
+ * per overlay section. The _ovly_buf_table symbol represents
+ * a table with one entry per overlay region.
+ * The struct spu_overlay_info gives the structure of the _ovly_table
+ * entries. The structure of _ovly_table_buf is simply one
+ * u32 word per entry.
+ */
+ overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
+ aSpu, &grd_val);
+ if (overlay_tbl_offset > 0x10000000) {
+ printk(KERN_ERR "SPU_PROF: "
+ "%s, line %d: Error finding SPU overlay table\n",
+ __func__, __LINE__);
+ goto fail;
+ }
+ ovly_table = spu_elf_start + overlay_tbl_offset;
+
+ n_ovlys = (ovly_table_end_sym -
+ ovly_table_sym) / sizeof (ovly);
+
+ /* Traverse overlay table. */
+ for (i = 0; i < n_ovlys; i++) {
+ if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
+ goto fail;
+
+ /* The ovly.vma/size/offset arguments are analogous to the same
+ * arguments used above for non-overlay maps. The final two
+ * args are referred to as the guard pointer and the guard
+ * value.
+ * The guard pointer is an entry in the _ovly_buf_table,
+ * computed using ovly.buf as the index into the table. Since
+ * ovly.buf values begin at '1' to reference the first (or 0th)
+ * entry in the _ovly_buf_table, the computation subtracts 1
+ * from ovly.buf.
+ * The guard value is stored in the _ovly_buf_table entry and
+ * is an index (starting at 1) back to the _ovly_table entry
+ * that is pointing at this _ovly_buf_table entry. So, for
+ * example, for an overlay scenario with one overlay segment
+ * and two overlay sections:
+ * - Section 1 points to the first entry of the
+ * _ovly_buf_table, which contains a guard value
+ * of '1', referencing the first (index=0) entry of
+ * _ovly_table.
+ * - Section 2 points to the second entry of the
+ * _ovly_buf_table, which contains a guard value
+ * of '2', referencing the second (index=1) entry of
+ * _ovly_table.
+ */
+ map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
+ ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
+ if (!map)
+ goto fail;
+ }
+ goto out;
+
+ fail:
+ map = NULL;
+ out:
+ return map;
+}