--- /dev/null
+spu_save_dump.h
+spu_restore_dump.h
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
+ spu_context_nospu_trace(destroy_spu_context__enter, ctx);
mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
mutex_unlock(&ctx->state_mutex);
kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
BUG_ON(!list_empty(&ctx->rq));
atomic_dec(&nr_spu_contexts);
+ kfree(ctx->switch_log);
kfree(ctx);
}
{
int ret;
+ spu_context_nospu_trace(spu_acquire_saved__enter, ctx);
+
ret = spu_acquire(ctx);
if (ret)
return ret;
.release = single_release,
};
+static inline int spufs_switch_log_used(struct spu_context *ctx)
+{
+ return (ctx->switch_log->head - ctx->switch_log->tail) %
+ SWITCH_LOG_BUFSIZE;
+}
+
+static inline int spufs_switch_log_avail(struct spu_context *ctx)
+{
+ return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
+}
+
+static int spufs_switch_log_open(struct inode *inode, struct file *file)
+{
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+
+ /*
+ * We (ab-)use the mapping_lock here because it serves the similar
+ * purpose for synchronizing open/close elsewhere. Maybe it should
+ * be renamed eventually.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->switch_log) {
+ spin_lock(&ctx->switch_log->lock);
+ ctx->switch_log->head = 0;
+ ctx->switch_log->tail = 0;
+ spin_unlock(&ctx->switch_log->lock);
+ } else {
+ /*
+ * We allocate the switch log data structures on first open.
+ * They will never be free because we assume a context will
+ * be traced until it goes away.
+ */
+ ctx->switch_log = kzalloc(sizeof(struct switch_log) +
+ SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+ GFP_KERNEL);
+ if (!ctx->switch_log)
+ goto out;
+ spin_lock_init(&ctx->switch_log->lock);
+ init_waitqueue_head(&ctx->switch_log->wait);
+ }
+ mutex_unlock(&ctx->mapping_lock);
+
+ return 0;
+ out:
+ mutex_unlock(&ctx->mapping_lock);
+ return -ENOMEM;
+}
+
+static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
+{
+ struct switch_log_entry *p;
+
+ p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
+
+ return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
+ (unsigned int) p->tstamp.tv_sec,
+ (unsigned int) p->tstamp.tv_nsec,
+ p->spu_id,
+ (unsigned int) p->type,
+ (unsigned int) p->val,
+ (unsigned long long) p->timebase);
+}
+
+static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int error = 0, cnt = 0;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ while (cnt < len) {
+ char tbuf[128];
+ int width;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (spufs_switch_log_used(ctx) <= 0)
+ return cnt ? cnt : -EAGAIN;
+ } else {
+ /* Wait for data in buffer */
+ error = wait_event_interruptible(ctx->switch_log->wait,
+ spufs_switch_log_used(ctx) > 0);
+ if (error)
+ break;
+ }
+
+ spin_lock(&ctx->switch_log->lock);
+ if (ctx->switch_log->head == ctx->switch_log->tail) {
+ /* multiple readers race? */
+ spin_unlock(&ctx->switch_log->lock);
+ continue;
+ }
+
+ width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
+ if (width < len) {
+ ctx->switch_log->tail =
+ (ctx->switch_log->tail + 1) %
+ SWITCH_LOG_BUFSIZE;
+ }
+
+ spin_unlock(&ctx->switch_log->lock);
+
+ /*
+ * If the record is greater than space available return
+ * partial buffer (so far)
+ */
+ if (width >= len)
+ break;
+
+ error = copy_to_user(buf + cnt, tbuf, width);
+ if (error)
+ break;
+ cnt += width;
+ }
+
+ return cnt == 0 ? error : cnt;
+}
+
+static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ unsigned int mask = 0;
+
+ poll_wait(file, &ctx->switch_log->wait, wait);
+
+ if (spufs_switch_log_used(ctx) > 0)
+ mask |= POLLIN;
+
+ return mask;
+}
+
+static const struct file_operations spufs_switch_log_fops = {
+ .owner = THIS_MODULE,
+ .open = spufs_switch_log_open,
+ .read = spufs_switch_log_read,
+ .poll = spufs_switch_log_poll,
+};
+
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+ u32 type, u32 val)
+{
+ if (!ctx->switch_log)
+ return;
+
+ spin_lock(&ctx->switch_log->lock);
+ if (spufs_switch_log_avail(ctx) > 1) {
+ struct switch_log_entry *p;
+
+ p = ctx->switch_log->log + ctx->switch_log->head;
+ ktime_get_ts(&p->tstamp);
+ p->timebase = get_tb();
+ p->spu_id = spu ? spu->number : -1;
+ p->type = type;
+ p->val = val;
+
+ ctx->switch_log->head =
+ (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
+ }
+ spin_unlock(&ctx->switch_log->lock);
+
+ wake_up(&ctx->switch_log->wait);
+}
struct tree_descr spufs_dir_contents[] = {
{ "capabilities", &spufs_caps_fops, 0444, },
{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
{ "tid", &spufs_tid_fops, 0444, },
{ "stat", &spufs_stat_fops, 0444, },
+ { "switch_log", &spufs_switch_log_fops, 0444 },
{},
};
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);
+ spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
+
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++;
spu->mfc_callback = spufs_mfc_callback;
mb();
spu_unmap_mappings(ctx);
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
spu->timestamp = jiffies;
ctx->state = SPU_STATE_SAVED;
spu->ibox_callback = NULL;
struct spu *spu;
int node, n;
- spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+ spu_context_nospu_trace(spu_find_victim__enter, ctx);
/*
* Look for a possible preemption candidate on the local node first.
SPU_SCHED_SPU_RUN, /* context is within spu_run */
};
+enum {
+ SWITCH_LOG_BUFSIZE = 4096,
+};
+
+enum {
+ SWITCH_LOG_START,
+ SWITCH_LOG_STOP,
+ SWITCH_LOG_EXIT,
+};
+
+struct switch_log {
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ unsigned long head;
+ unsigned long tail;
+ struct switch_log_entry {
+ struct timespec tstamp;
+ s32 spu_id;
+ u32 type;
+ u32 val;
+ u64 timebase;
+ } log[];
+};
+
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
unsigned long long libassist;
} stats;
+ /* context switch log */
+ struct switch_log *switch_log;
+
struct list_head aff_list;
int aff_head;
int aff_offset;
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+ u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
void spu_update_sched_info(struct spu_context *ctx);
void __spu_update_sched_info(struct spu_context *ctx);
enum spu_utilization_state new_state);
#define spu_context_trace(name, ctx, spu) \
- trace_mark(name, "%p %p", ctx, spu);
+ trace_mark(name, "ctx %p spu %p", ctx, spu);
#define spu_context_nospu_trace(name, ctx) \
- trace_mark(name, "%p", ctx);
+ trace_mark(name, "ctx %p", ctx);
#endif
}
struct spu_probe spu_probes[] = {
- { "spu_bind_context__enter", "%p %p", spu_context_event },
- { "spu_unbind_context__enter", "%p %p", spu_context_event },
- { "spu_get_idle__enter", "%p", spu_context_nospu_event },
- { "spu_get_idle__found", "%p %p", spu_context_event },
- { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
- { "spu_find_victim__enter", "%p", spu_context_nospu_event },
- { "spusched_tick__preempt", "%p %p", spu_context_event },
- { "spusched_tick__newslice", "%p", spu_context_nospu_event },
- { "spu_yield__enter", "%p", spu_context_nospu_event },
- { "spu_deactivate__enter", "%p", spu_context_nospu_event },
- { "__spu_deactivate__unload", "%p %p", spu_context_event },
- { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
- { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
- { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
- { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
- { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
- { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
- { "spufs_stop_callback__enter", "%p %p", spu_context_event },
+ { "spu_bind_context__enter", "ctx %p spu %p", spu_context_event },
+ { "spu_unbind_context__enter", "ctx %p spu %p", spu_context_event },
+ { "spu_get_idle__enter", "ctx %p", spu_context_nospu_event },
+ { "spu_get_idle__found", "ctx %p spu %p", spu_context_event },
+ { "spu_get_idle__not_found", "ctx %p", spu_context_nospu_event },
+ { "spu_find_victim__enter", "ctx %p", spu_context_nospu_event },
+ { "spusched_tick__preempt", "ctx %p spu %p", spu_context_event },
+ { "spusched_tick__newslice", "ctx %p", spu_context_nospu_event },
+ { "spu_yield__enter", "ctx %p", spu_context_nospu_event },
+ { "spu_deactivate__enter", "ctx %p", spu_context_nospu_event },
+ { "__spu_deactivate__unload", "ctx %p spu %p", spu_context_event },
+ { "spufs_ps_nopfn__enter", "ctx %p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__sleep", "ctx %p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__wake", "ctx %p spu %p", spu_context_event },
+ { "spufs_ps_nopfn__insert", "ctx %p spu %p", spu_context_event },
+ { "spu_acquire_saved__enter", "ctx %p", spu_context_nospu_event },
+ { "destroy_spu_context__enter", "ctx %p", spu_context_nospu_event },
+ { "spufs_stop_callback__enter", "ctx %p spu %p", spu_context_event },
};
static int __init sputrace_init(void)