rawdevice->as.nrpages = 0;
rawdevice->as.a_ops = &blkmtd_aops;
rawdevice->as.host = inode;
- rawdevice->as.i_mmap = NULL;
- rawdevice->as.i_mmap_shared = NULL;
+ INIT_LIST_HEAD(&rawdevice->as.i_mmap);
+ INIT_LIST_HEAD(&rawdevice->as.i_mmap_shared);
spin_lock_init(&rawdevice->as.i_shared_lock);
rawdevice->as.gfp_mask = GFP_KERNEL;
rawdevice->file = file;
INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1);
spin_lock_init(&inode->i_data.i_shared_lock);
+ INIT_LIST_HEAD(&inode->i_data.i_mmap);
+ INIT_LIST_HEAD(&inode->i_data.i_mmap_shared);
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping;
- if (mapping->i_mmap_shared != NULL) {
+ if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out_putf;
}
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = inode->i_mapping;
- if (mapping->i_mmap_shared != NULL) {
+ if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out_putf;
}
unsigned long nrpages; /* number of total pages */
struct address_space_operations *a_ops; /* methods */
struct inode *host; /* owner: inode, block_device */
- struct vm_area_struct *i_mmap; /* list of private mappings */
- struct vm_area_struct *i_mmap_shared; /* list of shared mappings */
+ list_t i_mmap; /* list of private mappings */
+ list_t i_mmap_shared; /* list of private mappings */
spinlock_t i_shared_lock; /* and spinlock protecting it */
int gfp_mask; /* how to allocate the pages */
};
* one of the address_space->i_mmap{,shared} lists,
* for shm areas, the list of attaches, otherwise unused.
*/
- struct vm_area_struct *vm_next_share;
- struct vm_area_struct **vm_pprev_share;
+ list_t shared;
/* Function pointers to deal with this struct. */
struct vm_operations_struct * vm_ops;
/* insert tmp into the share list, just after mpnt */
spin_lock(&inode->i_mapping->i_shared_lock);
- if((tmp->vm_next_share = mpnt->vm_next_share) != NULL)
- mpnt->vm_next_share->vm_pprev_share =
- &tmp->vm_next_share;
- mpnt->vm_next_share = tmp;
- tmp->vm_pprev_share = &mpnt->vm_next_share;
+ list_add_tail(&tmp->shared, &mpnt->shared);
spin_unlock(&inode->i_mapping->i_shared_lock);
}
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
- if (mapping->i_mmap_shared != NULL)
+ if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page);
/*
return -1;
}
-static void vmtruncate_list(struct vm_area_struct *mpnt, unsigned long pgoff)
+static void vmtruncate_list(list_t *head, unsigned long pgoff)
{
- do {
- unsigned long start = mpnt->vm_start;
- unsigned long end = mpnt->vm_end;
- unsigned long len = end - start;
- unsigned long diff;
+ unsigned long start, end, len, diff;
+ struct vm_area_struct *vma;
+ list_t *curr;
+
+ list_for_each(curr, head) {
+ vma = list_entry(curr, struct vm_area_struct, shared);
+ start = vma->vm_start;
+ end = vma->vm_end;
+ len = end - start;
/* mapping wholly truncated? */
- if (mpnt->vm_pgoff >= pgoff) {
- zap_page_range(mpnt, start, len);
+ if (vma->vm_pgoff >= pgoff) {
+ zap_page_range(vma, start, len);
continue;
}
/* mapping wholly unaffected? */
len = len >> PAGE_SHIFT;
- diff = pgoff - mpnt->vm_pgoff;
+ diff = pgoff - vma->vm_pgoff;
if (diff >= len)
continue;
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
- zap_page_range(mpnt, start, len);
- } while ((mpnt = mpnt->vm_next_share) != NULL);
+ zap_page_range(vma, start, len);
+ }
}
/*
goto do_expand;
inode->i_size = offset;
spin_lock(&mapping->i_shared_lock);
- if (!mapping->i_mmap && !mapping->i_mmap_shared)
+ if (list_empty(&mapping->i_mmap) && list_empty(&mapping->i_mmap_shared))
goto out_unlock;
pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (mapping->i_mmap != NULL)
- vmtruncate_list(mapping->i_mmap, pgoff);
- if (mapping->i_mmap_shared != NULL)
- vmtruncate_list(mapping->i_mmap_shared, pgoff);
+ if (!list_empty(&mapping->i_mmap))
+ vmtruncate_list(&mapping->i_mmap, pgoff);
+ if (!list_empty(&mapping->i_mmap_shared))
+ vmtruncate_list(&mapping->i_mmap_shared, pgoff);
out_unlock:
spin_unlock(&mapping->i_shared_lock);
struct inode *inode = file->f_dentry->d_inode;
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&inode->i_writecount);
- if(vma->vm_next_share)
- vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
- *vma->vm_pprev_share = vma->vm_next_share;
+ list_del_init(&vma->shared);
}
}
if (file) {
struct inode * inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
- struct vm_area_struct **head;
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
- head = &mapping->i_mmap;
if (vma->vm_flags & VM_SHARED)
- head = &mapping->i_mmap_shared;
-
- /* insert vma into inode's share list */
- if((vma->vm_next_share = *head) != NULL)
- (*head)->vm_pprev_share = &vma->vm_next_share;
- *head = vma;
- vma->vm_pprev_share = head;
+ list_add_tail(&vma->shared, &mapping->i_mmap_shared);
+ else
+ list_add_tail(&vma->shared, &mapping->i_mmap);
}
}
if ((desc->error = shmem_getpage(inode, index, &page)))
break;
- if (mapping->i_mmap_shared != NULL)
+ if (!list_empty(&mapping->i_mmap_shared))
flush_dcache_page(page);
/*