* and invalidate any user data.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
}
static void
* space, then we need to handle them specially to maintain
* cache coherency.
*/
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
/*
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
}
+ flush_dcache_mmap_unlock(mapping);
if (aliases)
adjust_pte(vma, addr);
else
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */
+ flush_dcache_mmap_lock(mapping);
while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
&iter, pgoff, pgoff)) != NULL) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
__flush_cache_page(mpnt, addr);
- return;
+ break;
}
+ flush_dcache_mmap_unlock(mapping);
}
EXPORT_SYMBOL(__flush_dcache_page);
__flush_dcache_page(page);
}
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
}
}
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_mmap_lock);
+ flush_dcache_mmap_lock(mapping);
vma_prio_tree_add(tmp, mpnt);
+ flush_dcache_mmap_unlock(mapping);
spin_unlock(&file->f_mapping->i_mmap_lock);
}
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
- * ->i_mmap_lock (various places)
+ * ->i_mmap_lock
+ * ->page_table_lock (various places, mainly in mmap.c)
+ * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
!(vma->vm_flags & VM_NONLINEAR)) {
mapping = vma->vm_file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
+ flush_dcache_mmap_lock(mapping);
vma->vm_flags |= VM_NONLINEAR;
vma_prio_tree_remove(vma, &mapping->i_mmap);
vma_prio_tree_init(vma);
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
+ flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
#include <asm/tlb.h>
/*
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable--;
+ flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_del_init(&vma->shared.vm_set.list);
else
vma_prio_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
}
/*
if (vma->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
+ flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_add_tail(&vma->shared.vm_set.list,
&mapping->i_mmap_nonlinear);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
}
}
}
spin_lock(&mm->page_table_lock);
- if (root)
+ if (root) {
+ flush_dcache_mmap_lock(mapping);
vma_prio_tree_remove(vma, root);
+ }
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
if (root) {
vma_prio_tree_init(vma);
vma_prio_tree_insert(vma, root);
+ flush_dcache_mmap_unlock(mapping);
}
if (next) {