spin_lock_init(&inode->i_data.private_lock);
INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_PRIO_TREE_ROOT(&inode->i_data.i_mmap_shared);
+ INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
spin_lock_init(&inode->i_lock);
i_size_ordered_init(inode);
}
struct address_space_operations *a_ops; /* methods */
struct prio_tree_root i_mmap; /* tree of private mappings */
struct prio_tree_root i_mmap_shared; /* tree of shared mappings */
+ struct list_head i_mmap_nonlinear;/*list of nonlinear mappings */
spinlock_t i_mmap_lock; /* protect trees & list above */
atomic_t truncate_count; /* Cover race condition with truncate */
unsigned long flags; /* error bits/gfp mask */
static inline int mapping_mapped(struct address_space *mapping)
{
return !prio_tree_empty(&mapping->i_mmap) ||
- !prio_tree_empty(&mapping->i_mmap_shared);
+ !prio_tree_empty(&mapping->i_mmap_shared) ||
+ !list_empty(&mapping->i_mmap_nonlinear);
}
/*
*/
static inline int mapping_writably_mapped(struct address_space *mapping)
{
- return !prio_tree_empty(&mapping->i_mmap_shared);
+ return !prio_tree_empty(&mapping->i_mmap_shared) ||
+ !list_empty(&mapping->i_mmap_nonlinear);
}
/*
unsigned long __prot, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ struct address_space *mapping;
unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
end <= vma->vm_end) {
/* Must set VM_NONLINEAR before any pages are populated. */
- if (pgoff != linear_page_index(vma, start))
+ if (pgoff != linear_page_index(vma, start) &&
+ !(vma->vm_flags & VM_NONLINEAR)) {
+ mapping = vma->vm_file->f_mapping;
+ spin_lock(&mapping->i_mmap_lock);
vma->vm_flags |= VM_NONLINEAR;
+ vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
+ vma_prio_tree_init(vma);
+ list_add_tail(&vma->shared.vm_set.list,
+ &mapping->i_mmap_nonlinear);
+ spin_unlock(&mapping->i_mmap_lock);
+ }
/* ->populate can take a long time, so downgrade the lock. */
downgrade_write(&mm->mmap_sem);
while ((vma = vma_prio_tree_next(vma, root, &iter,
details->first_index, details->last_index)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- continue;
vba = vma->vm_pgoff;
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
}
}
-static void unmap_nonlinear_range_list(struct prio_tree_root *root,
- struct zap_details *details)
-{
- struct vm_area_struct *vma = NULL;
- struct prio_tree_iter iter;
-
- while ((vma = vma_prio_tree_next(vma, root, &iter,
- 0, ULONG_MAX)) != NULL) {
- if (!(vma->vm_flags & VM_NONLINEAR))
- continue;
- details->nonlinear_vma = vma;
- zap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, details);
- }
-}
-
/**
* unmap_mapping_range - unmap the portion of all mmaps
* in the specified address_space corresponding to the specified
/* Don't waste time to check mapping on fully shared vmas */
details.check_mapping = NULL;
- if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared))) {
+ if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared)))
unmap_mapping_range_list(&mapping->i_mmap_shared, &details);
- unmap_nonlinear_range_list(&mapping->i_mmap_shared, &details);
- }
+ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
+ struct vm_area_struct *vma;
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ details.nonlinear_vma = vma;
+ zap_page_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, &details);
+ }
+ }
spin_unlock(&mapping->i_mmap_lock);
}
EXPORT_SYMBOL(unmap_mapping_range);
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
- if (vma->vm_flags & VM_SHARED)
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ list_del_init(&vma->shared.vm_set.list);
+ else if (vma->vm_flags & VM_SHARED)
vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_remove(vma, &mapping->i_mmap);
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&file->f_dentry->d_inode->i_writecount);
- if (vma->vm_flags & VM_SHARED)
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ list_add_tail(&vma->shared.vm_set.list,
+ &mapping->i_mmap_nonlinear);
+ else if (vma->vm_flags & VM_SHARED)
vma_prio_tree_insert(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
if (file) {
mapping = file->f_mapping;
- if (vma->vm_flags & VM_SHARED)
- root = &mapping->i_mmap_shared;
- else
+ if (!(vma->vm_flags & VM_SHARED))
root = &mapping->i_mmap;
+ else if (!(vma->vm_flags & VM_NONLINEAR))
+ root = &mapping->i_mmap_shared;
spin_lock(&mapping->i_mmap_lock);
}
spin_lock(&mm->page_table_lock);
/*
* Add a new vma known to map the same set of pages as the old vma:
* useful for fork's dup_mmap as well as vma_prio_tree_insert below.
+ * Note that it just happens to work correctly on i_mmap_nonlinear too.
*/
void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old)
{
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
&iter, pgoff, pgoff)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
- failed++;
- continue;
- }
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) {
referenced++;
goto out;
}
}
- /* Hmm, but what of the nonlinears which pgoff,pgoff skipped? */
- WARN_ON(!failed);
+ if (list_empty(&mapping->i_mmap_nonlinear))
+ WARN_ON(!failed);
out:
spin_unlock(&mapping->i_mmap_lock);
return referenced;
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
&iter, pgoff, pgoff)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- continue;
if (vma->vm_mm->rss) {
address = vma_address(vma, pgoff);
ret = try_to_unmap_one(page,
}
}
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if (VM_NONLINEAR != (vma->vm_flags &
- (VM_NONLINEAR|VM_LOCKED|VM_RESERVED)))
+ if (list_empty(&mapping->i_mmap_nonlinear))
+ goto out;
+
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = CLUSTER_SIZE;
do {
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if (VM_NONLINEAR != (vma->vm_flags &
- (VM_NONLINEAR|VM_LOCKED|VM_RESERVED)))
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
while (vma->vm_mm->rss &&
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
- vma = NULL; /* it is already, but above loop might change */
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if ((vma->vm_flags & (VM_NONLINEAR|VM_RESERVED)) ==
- VM_NONLINEAR)
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (!(vma->vm_flags & VM_RESERVED))
vma->vm_private_data = 0;
}
relock: