#ifdef CONFIG_PERFMON
/*
- * For PMU which rely on the debug registers for some features, you must
- * you must enable the following flag to activate the support for
+ * For PMUs which rely on the debug registers for some features, you
+ * must enable the following flag to activate the support for
* accessing the registers via the perfmonctl() interface.
*/
#ifdef CONFIG_ITANIUM
static unsigned long pfm_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
static unsigned long pfm_recorded_samples_count;
+
+static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values for PMCS */
+
static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close
* forward declarations
*/
static void ia64_reset_pmu(struct task_struct *);
+#ifdef CONFIG_SMP
static void pfm_fetch_regs(int cpu, struct task_struct *task, pfm_context_t *ctx);
+#endif
static void pfm_lazy_save_regs (struct task_struct *ta);
static inline unsigned long
if (mem) {
adr=(unsigned long) mem;
- while ((long) size > 0)
+ while ((long) size > 0) {
mem_map_unreserve(vmalloc_to_page((void*)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
*/
if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
-
+#if 0
if (ctx->ctx_fl_frozen==0) {
printk("task %d without pmu_frozen set\n", task->pid);
return -EINVAL;
}
+#endif
if (task == current) {
DBprintk(("restarting self %d frozen=%d \n", current->pid, ctx->ctx_fl_frozen));
return 0;
}
+#ifndef CONFIG_SMP
+/*
+ * On UP kernels, we do not need to constantly set the psr.pp bit
+ * when a task is scheduled. The psr.pp bit can only be changed in
+ * the kernel because of a user request. Given we are on a UP non preeemptive
+ * kernel we know that no other task is running, so we cna simply update their
+ * psr.pp from their saved state. There is this no impact on the context switch
+ * code compared to the SMP case.
+ */
+static void
+pfm_tasklist_toggle_pp(unsigned int val)
+{
+ struct task_struct *p;
+ struct pt_regs *regs;
+
+ DBprintk(("invoked by [%d] pp=%u\n", current->pid, val));
+
+ read_lock(&tasklist_lock);
+
+ for_each_task(p) {
+ regs = (struct pt_regs *)((unsigned long) p + IA64_STK_OFFSET);
+
+ /*
+ * position on pt_regs saved on stack on 1st entry into the kernel
+ */
+ regs--;
+
+ /*
+ * update psr.pp
+ */
+ ia64_psr(regs)->pp = val;
+ }
+ read_unlock(&tasklist_lock);
+}
+#endif
+
+
+
static int
-pfm_destroy_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
+pfm_stop(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
struct pt_regs *regs)
{
/* we don't quite support this right now */
if (task != current) return -EINVAL;
- if (ctx->ctx_fl_system) {
- ia64_psr(regs)->pp = 0;
+ /*
+ * Cannot do anything before PMU is enabled
+ */
+ if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+
+ DBprintk(("[%d] fl_system=%d owner=%p current=%p\n",
+ current->pid,
+ ctx->ctx_fl_system, PMU_OWNER(),
+ current));
+ /* simply stop monitoring but not the PMU */
+ if (ctx->ctx_fl_system) {
+
__asm__ __volatile__ ("rsm psr.pp;;"::: "memory");
+
+ /* disable dcr pp */
+ ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+
+#ifdef CONFIG_SMP
+ local_cpu_data->pfm_dcr_pp = 0;
+#else
+ pfm_tasklist_toggle_pp(0);
+#endif
+
+ ia64_psr(regs)->pp = 0;
+
} else {
- ia64_psr(regs)->up = 0;
__asm__ __volatile__ ("rum psr.up;;"::: "memory");
- task->thread.flags &= ~IA64_THREAD_PM_VALID;
+ ia64_psr(regs)->up = 0;
}
+ return 0;
+}
- SET_PMU_OWNER(NULL);
+static int
+pfm_disable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
+ struct pt_regs *regs)
+{
+ /* we don't quite support this right now */
+ if (task != current) return -EINVAL;
- /* freeze PMU */
- ia64_set_pmc(0, 1);
- ia64_srlz_d();
+ if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+
+ /*
+ * stop monitoring, freeze PMU, and save state in context
+ * this call will clear IA64_THREAD_PM_VALID for per-task sessions.
+ */
+ pfm_flush_regs(task);
+
+ if (ctx->ctx_fl_system) {
+ ia64_psr(regs)->pp = 0;
+ } else {
+ ia64_psr(regs)->up = 0;
+ }
+ /*
+ * goes back to default behavior
+ * no need to change live psr.sp because useless at the kernel level
+ */
+ ia64_psr(regs)->sp = 1;
+
+ DBprintk(("enabling psr.sp for [%d]\n", current->pid));
+
+ ctx->ctx_flags.state = PFM_CTX_DISABLED;
+
+ return 0;
+}
+
+
+
+static int
+pfm_destroy_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
+ struct pt_regs *regs)
+{
+ /* we don't quite support this right now */
+ if (task != current) return -EINVAL;
+
+ /*
+ * if context was never enabled, then there is not much
+ * to do
+ */
+ if (!CTX_IS_ENABLED(ctx)) goto skipped_stop;
+
+ /*
+ * Disable context: stop monitoring, flush regs to software state (useless here),
+ * and freeze PMU
+ *
+ * The IA64_THREAD_PM_VALID is cleared by pfm_flush_regs() called from pfm_disable()
+ */
+ pfm_disable(task, ctx, arg, count, regs);
+
+ if (ctx->ctx_fl_system) {
+ ia64_psr(regs)->pp = 0;
+ } else {
+ ia64_psr(regs)->up = 0;
+ }
/* restore security level */
ia64_psr(regs)->sp = 1;
+skipped_stop:
/*
* remove sampling buffer mapping, if any
*/
/* enable dcr pp */
ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);
+#ifdef CONFIG_SMP
local_cpu_data->pfm_dcr_pp = 1;
+#else
+ pfm_tasklist_toggle_pp(1);
+#endif
ia64_psr(regs)->pp = 1;
+
__asm__ __volatile__ ("ssm psr.pp;;"::: "memory");
} else {
return 0;
}
-static int
-pfm_disable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
-
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
-
- /*
- * stop monitoring, freeze PMU, and save state in context
- */
- pfm_flush_regs(task);
-
- /*
- * just to make sure nothing starts again when back in user mode.
- * pfm_flush_regs() freezes the PMU anyway.
- */
- if (ctx->ctx_fl_system) {
- ia64_psr(regs)->pp = 0;
- } else {
- ia64_psr(regs)->up = 0;
- }
-
- /*
- * goes back to default behavior
- * no need to change live psr.sp because useless at the kernel level
- */
- ia64_psr(regs)->sp = 1;
-
- DBprintk(("enabling psr.sp for [%d]\n", current->pid));
-
- ctx->ctx_flags.state = PFM_CTX_DISABLED;
-
- return 0;
-}
-
-static int
-pfm_stop(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
-
- /*
- * Cannot do anything before PMU is enabled
- */
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
-
- DBprintk(("[%d] fl_system=%d owner=%p current=%p\n",
- current->pid,
- ctx->ctx_fl_system, PMU_OWNER(),
- current));
- /* simply stop monitoring but not the PMU */
- if (ctx->ctx_fl_system) {
-
- __asm__ __volatile__ ("rsm psr.pp;;"::: "memory");
-
- /* disable dcr pp */
- ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
-
- local_cpu_data->pfm_dcr_pp = 0;
-
- ia64_psr(regs)->pp = 0;
-
- __asm__ __volatile__ ("rsm psr.pp;;"::: "memory");
-
- } else {
- ia64_psr(regs)->up = 0;
- __asm__ __volatile__ ("rum psr.up;;"::: "memory");
- }
- return 0;
-}
-
/*
* functions MUST be listed in the increasing order of their index (see permfon.h)
*/
p += sprintf(p, "recorded samples=%lu\n", pfm_recorded_samples_count);
p += sprintf(p, "CPU%d.pmc[0]=%lx\nPerfmon debug: %s\n",
- smp_processor_id(), pmc0, pfm_debug ? "On" : "Off");
+ smp_processor_id(), pmc0, pfm_debug_mode ? "On" : "Off");
+#ifdef CONFIG_SMP
p += sprintf(p, "CPU%d cpu_data.pfm_syst_wide=%d cpu_data.dcr_pp=%d\n",
smp_processor_id(), local_cpu_data->pfm_syst_wide, local_cpu_data->pfm_dcr_pp);
+#endif
LOCK_PFS();
p += sprintf(p, "proc_sessions=%lu\nsys_sessions=%lu\nsys_use_dbregs=%lu\nptrace_use_dbregs=%lu\n",
return len;
}
+#ifdef CONFIG_SMP
void
pfm_syst_wide_update_task(struct task_struct *task, int mode)
{
*/
ia64_psr(regs)->pp = mode ? local_cpu_data->pfm_dcr_pp : 0;
}
+#endif
+
void
pfm_save_regs (struct task_struct *task)
/* not owned by this CPU */
atomic_set(&ctx->ctx_last_cpu, -1);
+#ifdef CONFIG_SMP
do_nothing:
+#endif
/*
* declare we are done saving this context
*
struct task_struct *owner;
unsigned long mask;
u64 psr;
- int i, cpu;
+ int i;
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
owner = PMU_OWNER();
ctx = task->thread.pfm_context;
}
-static void
-pfm_model_specific_reset_pmu(struct task_struct *task)
-{
- int i;
-
-#ifdef CONFIG_ITANIUM
- /* opcode matcher set to all 1s */
- ia64_set_pmc(8,~0UL);
- ia64_set_pmc(9,~0UL);
-
- /* I-EAR config cleared, plm=0 */
- ia64_set_pmc(10,0UL);
-
- /* D-EAR config cleared, PMC[11].pt must be 1 */
- ia64_set_pmc(11,1UL << 28);
-
- /* BTB config. plm=0 */
- ia64_set_pmc(12,0UL);
-
- /* Instruction address range, PMC[13].ta must be 1 */
- ia64_set_pmc(13,1UL);
-
- /*
- * Clear all PMDs
- *
- * XXX: may be good enough to rely on the impl_regs to generalize
- * this.
- */
- for(i = 0; i< 18 ; i++) {
- ia64_set_pmd(i,0UL);
- }
-#endif
-}
-
/*
- * XXX: this routine is not very portable for PMCs
* XXX: make this routine able to work with non current context
*/
static void
ia64_reset_pmu(struct task_struct *task)
{
- pfm_context_t *ctx = task->thread.pfm_context;
struct thread_struct *t = &task->thread;
+ pfm_context_t *ctx = t->pfm_context;
unsigned long mask;
int i;
return;
}
- /* PMU is frozen, no pending overflow bits */
+ /* Let's make sure the PMU is frozen */
ia64_set_pmc(0,1);
/*
- * Let's first do the architected initializations
+ * install reset values for PMC. We skip PMC0 (done above)
+ * XX: good up to 64 PMCS
*/
-
- /* clear counters */
- ia64_set_pmd(4,0UL);
- ia64_set_pmd(5,0UL);
- ia64_set_pmd(6,0UL);
- ia64_set_pmd(7,0UL);
-
- /* clear overflow status bits */
- ia64_set_pmc(1,0UL);
- ia64_set_pmc(2,0UL);
- ia64_set_pmc(3,0UL);
-
- /* clear counting monitor configuration */
- ia64_set_pmc(4,0UL);
- ia64_set_pmc(5,0UL);
- ia64_set_pmc(6,0UL);
- ia64_set_pmc(7,0UL);
-
+ mask = pmu_conf.impl_regs[0] >> 1;
+ for(i=1; mask; mask>>=1, i++) {
+ if (mask & 0x1) {
+ ia64_set_pmc(i, reset_pmcs[i]);
+ /*
+ * When restoring context, we must restore ALL pmcs, even the ones
+ * that the task does not use to avoid leaks and possibly corruption
+ * of the sesion because of configuration conflicts. So here, we
+ * initializaed the table used in the context switch restore routine.
+ */
+ t->pmc[i] = reset_pmcs[i];
+ DBprintk((" pmc[%d]=0x%lx\n", i, reset_pmcs[i]));
+
+ }
+ }
/*
- * Now let's do the CPU model specific initializations
+ * clear reset values for PMD.
+ * XX: good up to 64 PMDS. Suppose that zero is a valid value.
*/
- pfm_model_specific_reset_pmu(task);
+ mask = pmu_conf.impl_regs[4];
+ for(i=0; mask; mask>>=1, i++) {
+ if (mask & 0x1) ia64_set_pmd(i, 0UL);
+ }
/*
* On context switched restore, we must restore ALL pmc even
*/
ctx->ctx_reload_pmcs[0] = pmu_conf.impl_regs[0];
- /*
- * make sure we pick up whatever values were installed
- * for the CPU model specific reset. We also include
- * the architected PMC (pmc4-pmc7)
- *
- * This step is required in order to restore the correct values in PMC when
- * the task is switched out and back in just after the PFM_ENABLE.
- */
- mask = pmu_conf.impl_regs[0];
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) t->pmc[i] = ia64_get_pmc(i);
- }
-
/*
* useful in case of re-enable after disable
*/
* By now, we could still have an overflow interrupt in-flight.
*/
if (ctx->ctx_fl_system) {
+
+ __asm__ __volatile__ ("rsm psr.pp;;"::: "memory");
+
/* disable dcr pp */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+#ifdef CONFIG_SMP
local_cpu_data->pfm_syst_wide = 0;
local_cpu_data->pfm_dcr_pp = 0;
-
-
- __asm__ __volatile__ ("rsm psr.pp;;"::: "memory");
+#else
+ pfm_tasklist_toggle_pp(0);
+#endif
} else {
}
+
/*
* task is the newly created task, pt_regs for new child
*/
};
+static void
+pfm_pmu_snapshot(void)
+{
+ int i;
+
+ for (i=0; i < IA64_NUM_PMC_REGS; i++) {
+ if (i >= pmu_conf.num_pmcs) break;
+ if (PMC_IS_IMPL(i)) reset_pmcs[i] = ia64_get_pmc(i);
+ }
+}
+
/*
* perfmon initialization routine, called from the initcall() table
*/
pmu_conf.num_pmcs = find_num_pm_regs(pmu_conf.impl_regs);
pmu_conf.num_pmds = find_num_pm_regs(&pmu_conf.impl_regs[4]);
- printk("perfmon: %u bits counters (max value 0x%016lx)\n",
- pm_info.pal_perf_mon_info_s.width, pmu_conf.perf_ovfl_val);
+ printk("perfmon: %u bits counters\n", pm_info.pal_perf_mon_info_s.width);
printk("perfmon: %lu PMC/PMD pairs, %lu PMCs, %lu PMDs\n",
pmu_conf.max_counters, pmu_conf.num_pmcs, pmu_conf.num_pmds);
pmu_conf.num_ibrs <<=1;
pmu_conf.num_dbrs <<=1;
+ /*
+ * take a snapshot of all PMU registers. PAL is supposed
+ * to configure them with stable/safe values, i.e., not
+ * capturing anything.
+ * We take a snapshot now, before we make any modifications. This
+ * will become our master copy. Then we will reuse the snapshot
+ * to reset the PMU in pfm_enable(). Using this technique, perfmon
+ * does NOT have to know about the specific values to program for
+ * the PMC/PMD. The safe values may be different from one CPU model to
+ * the other.
+ */
+ pfm_pmu_snapshot();
+
/*
* list the pmc registers used to control monitors
* XXX: unfortunately this information is not provided by PAL