return ia64_get_itc();
}
-/* Given PGD from the address space's page table, return the kernel
- * virtual mapping of the physical memory mapped at ADR.
- */
-static inline unsigned long
-uvirt_to_kva(pgd_t *pgd, unsigned long adr)
-{
- unsigned long ret = 0UL;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- if (!pgd_none(*pgd)) {
- pmd = pmd_offset(pgd, adr);
- if (!pmd_none(*pmd)) {
- ptep = pte_offset(pmd, adr);
- pte = *ptep;
- if (pte_present(pte)) {
- ret = (unsigned long) page_address(pte_page(pte));
- ret |= (adr & (PAGE_SIZE - 1));
- }
- }
- }
- DBprintk(("[%d] uv2kva(%lx-->%lx)\n", current->pid, adr, ret));
- return ret;
-}
-
/* Here we want the physical address of the memory.
- * This is used when initializing the contents of the
- * area and marking the pages as reserved.
+ * This is used when initializing the contents of the area.
*/
static inline unsigned long
- kvirt_to_pa(unsigned long adr)
+ pfm_kvirt_to_pa(unsigned long adr)
{
__u64 pa = ia64_tpa(adr);
- DBprintk(("kv2pa(%lx-->%lx)\n", adr, pa));
+ //DBprintk(("kv2pa(%lx-->%lx)\n", adr, pa));
return pa;
}
+
static void *
- rvmalloc(unsigned long size)
+ pfm_rvmalloc(unsigned long size)
{
void *mem;
- unsigned long adr, page;
+ unsigned long adr;
+ size=PAGE_ALIGN(size);
mem=vmalloc(size);
if (mem) {
+ //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
memset(mem, 0, size); /* Clear the ram out, no junk to the user */
adr=(unsigned long) mem;
while (size > 0) {
}
static void
- rvfree(void *mem, unsigned long size)
+ pfm_rvfree(void *mem, unsigned long size)
{
- unsigned long adr, page = 0;
+ unsigned long adr;
if (mem) {
adr=(unsigned long) mem;
- while ((long) size > 0) {
- mem_map_unreserve(vmalloc_to_page((void *)adr));
- while (size > 0) {
- page = pfm_kvirt_to_pa(adr);
- mem_map_unreserve(virt_to_page(__va(page)));
++ while ((long) size > 0)
++ mem_map_unreserve(vmalloc_to_page((void*)adr));
adr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
*/
if (size > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN;
- /* find some free area in address space */
- addr = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE);
- if (!addr) goto no_addr;
-
- DBprintk((" entries=%ld aligned size=%ld, unmapped @0x%lx\n", entries, size, addr));
-
- /* allocate vma */
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!vma) goto no_vma;
-
/*
- * initialize the vma for the sampling buffer
+ * We do the easy to undo allocations first.
+ *
+ * pfm_rvmalloc(), clears the buffer, so there is no leak
*/
- vma->vm_mm = mm;
- vma->vm_start = addr;
- vma->vm_end = addr + size;
- vma->vm_flags = VM_READ|VM_MAYREAD;
- vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
- vma->vm_ops = NULL;
- vma->vm_pgoff = 0;
- vma->vm_file = NULL;
- vma->vm_raend = 0;
-
- smpl_buf = rvmalloc(size);
- if (smpl_buf == NULL) goto no_buffer;
-
- DBprintk((" smpl_buf @%p\n", smpl_buf));
+ smpl_buf = pfm_rvmalloc(size);
+ if (smpl_buf == NULL) {
+ DBprintk(("Can't allocate sampling buffer\n"));
+ return -ENOMEM;
+ }
- if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, addr, size)) goto cant_remap;
+ DBprintk(("smpl_buf @%p\n", smpl_buf));
/* allocate sampling buffer descriptor now */
- psb = vmalloc(sizeof(*psb));
- if (psb == NULL) goto no_buffer_desc;
-
- /* start with something clean */
- memset(smpl_buf, 0x0, size);
+ psb = kmalloc(sizeof(*psb), GFP_KERNEL);
+ if (psb == NULL) {
+ DBprintk(("Can't allocate sampling buffer descriptor\n"));
+ pfm_rvfree(smpl_buf, size);
+ return -ENOMEM;
+ }
- vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
- vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
- vma->vm_ops = &pfm_vm_ops; /* necesarry to get the close() callback */
- vma->vm_pgoff = 0;
- vma->vm_file = NULL;
- vma->vm_raend = 0;
- vma->vm_private_data = psb; /* information needed by the pfm_vm_close() function */
-
- /*
- * Now we have everything we need and we can initialize
- * and connect all the data structures
- */
-
+ /* allocate vma */
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ DBprintk(("Cannot allocate vma\n"));
+ goto error;
+ }
+ /*
+ * partially initialize the vma for the sampling buffer
+ */
psb->psb_hdr = smpl_buf;
- psb->psb_addr = (char *)smpl_buf+sizeof(perfmon_smpl_hdr_t); /* first entry */
+ psb->psb_addr = ((char *)smpl_buf)+sizeof(perfmon_smpl_hdr_t); /* first entry */
psb->psb_size = size; /* aligned size */
psb->psb_index = 0;
psb->psb_entries = entries;