return err;
}
-static int loop_clr_fd(struct loop_device *lo, kdev_t dev)
+static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
{
struct file *filp = lo->lo_backing_file;
int gfp = lo->old_gfp_mask;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_name, 0, LO_NAME_SIZE);
loop_sizes[lo->lo_number] = 0;
- invalidate_buffers(dev);
+ invalidate_bdev(bdev, 0);
filp->f_dentry->d_inode->i_mapping->gfp_mask = gfp;
lo->lo_state = Lo_unbound;
fput(filp);
err = loop_set_fd(lo, file, inode->i_rdev, arg);
break;
case LOOP_CLR_FD:
- err = loop_clr_fd(lo, inode->i_rdev);
+ err = loop_clr_fd(lo, inode->i_bdev);
break;
case LOOP_SET_STATUS:
err = loop_set_status(lo, (struct loop_info *) arg);
static void idedisk_release (struct inode *inode, struct file *filp, ide_drive_t *drive)
{
if (drive->removable && !drive->usage) {
- invalidate_buffers(inode->i_rdev);
+ invalidate_bdev(inode->i_bdev, 0);
if (drive->doorlocking && ide_wait_cmd(drive, WIN_DOORUNLOCK, 0, 0, 0, NULL))
drive->doorlocking = 0;
}
if (!drive->usage) {
idefloppy_floppy_t *floppy = drive->driver_data;
- invalidate_buffers (inode->i_rdev);
+ invalidate_bdev (inode->i_bdev, 0);
/* IOMEGA Clik! drives do not support lock/unlock commands */
if (!test_bit(IDEFLOPPY_CLIK_DRIVE, &floppy->flags)) {
return (loff_t) blocks << BLOCK_SIZE_BITS;
}
+/* Kill _all_ buffers, dirty or not.. */
+static void kill_bdev(struct block_device *bdev)
+{
+ invalidate_bdev(bdev, 1);
+ truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+}
+
+static inline void kill_buffers(kdev_t dev)
+{
+ struct block_device *bdev = bdget(dev);
+ if (bdev) {
+ kill_bdev(bdev);
+ bdput(bdev);
+ }
+}
+
+void set_blocksize(kdev_t dev, int size)
+{
+ extern int *blksize_size[];
+
+ if (!blksize_size[MAJOR(dev)])
+ return;
+
+ /* Size must be a power of two, and between 512 and PAGE_SIZE */
+ if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
+ panic("Invalid blocksize passed to set_blocksize");
+
+ if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
+ blksize_size[MAJOR(dev)][MINOR(dev)] = size;
+ return;
+ }
+ if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
+ return;
+
+ sync_buffers(dev, 2);
+ blksize_size[MAJOR(dev)][MINOR(dev)] = size;
+ kill_buffers(dev);
+}
+
static inline int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result)
{
int err;
*/
static loff_t block_llseek(struct file *file, loff_t offset, int origin)
{
- long long retval;
- kdev_t dev;
+ /* ewww */
+ loff_t size = file->f_dentry->d_inode->i_bdev->bd_inode->i_size;
+ loff_t retval;
switch (origin) {
case 2:
- dev = file->f_dentry->d_inode->i_rdev;
- if (blk_size[MAJOR(dev)])
- offset += (loff_t) blk_size[MAJOR(dev)][MINOR(dev)] << BLOCK_SIZE_BITS;
- /* else? return -EINVAL? */
+ offset += size;
break;
case 1:
offset += file->f_pos;
}
retval = -EINVAL;
- if (offset >= 0) {
+ if (offset >= 0 && offset <= size) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
new_bdev->bd_dev = dev;
new_bdev->bd_op = NULL;
new_bdev->bd_inode = inode;
- inode->i_size = blkdev_size(dev);
inode->i_rdev = to_kdev_t(dev);
inode->i_bdev = new_bdev;
inode->i_data.a_ops = &def_blk_aops;
ret = bdev->bd_op->open(bdev->bd_inode, &fake_file);
if (!ret) {
bdev->bd_openers++;
+ bdev->bd_inode->i_size = blkdev_size(rdev);
} else if (!bdev->bd_openers)
bdev->bd_op = NULL;
}
lock_kernel();
if (!bdev->bd_op)
bdev->bd_op = get_blkfops(MAJOR(inode->i_rdev));
+
if (bdev->bd_op) {
ret = 0;
if (bdev->bd_op->open)
ret = bdev->bd_op->open(inode,filp);
- if (!ret)
+ if (!ret) {
bdev->bd_openers++;
- else if (!bdev->bd_openers)
+ bdev->bd_inode->i_size = blkdev_size(inode->i_rdev);
+ } else if (!bdev->bd_openers)
bdev->bd_op = NULL;
}
+
unlock_kernel();
up(&bdev->bd_sem);
if (ret)
down(&bdev->bd_sem);
lock_kernel();
- if (kind == BDEV_FILE) {
+ if (kind == BDEV_FILE)
__block_fsync(bd_inode);
- } else if (kind == BDEV_FS)
+ else if (kind == BDEV_FS)
fsync_no_super(rdev);
- if (!--bdev->bd_openers) {
- truncate_inode_pages(bd_inode->i_mapping, 0);
- invalidate_buffers(rdev);
- }
+ if (!--bdev->bd_openers)
+ kill_bdev(bdev);
if (bdev->bd_op->release)
ret = bdev->bd_op->release(bd_inode, NULL);
if (!bdev->bd_openers)
static spinlock_t unused_list_lock = SPIN_LOCK_UNLOCKED;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-static void truncate_buffers(kdev_t dev);
static int grow_buffers(kdev_t dev, unsigned long block, int size);
static void __refile_buffer(struct buffer_head *);
we think the disk contains more recent information than the buffercache.
The update == 1 pass marks the buffers we need to update, the update == 2
pass does the actual I/O. */
-void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
+void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
{
int i, nlist, slept;
struct buffer_head * bh, * bh_next;
+ kdev_t dev = to_kdev_t(bdev->bd_dev); /* will become bdev */
retry:
slept = 0;
goto retry;
/* Get rid of the page cache */
- truncate_buffers(dev);
+ invalidate_inode_pages(bdev->bd_inode);
}
-void set_blocksize(kdev_t dev, int size)
+void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
{
- extern int *blksize_size[];
-
- if (!blksize_size[MAJOR(dev)])
- return;
-
- /* Size must be a power of two, and between 512 and PAGE_SIZE */
- if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
- panic("Invalid blocksize passed to set_blocksize");
-
- if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
- blksize_size[MAJOR(dev)][MINOR(dev)] = size;
- return;
+ struct block_device *bdev = bdget(dev);
+ if (bdev) {
+ invalidate_bdev(bdev, destroy_dirty_buffers);
+ bdput(bdev);
}
- if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
- return;
-
- sync_buffers(dev, 2);
- blksize_size[MAJOR(dev)][MINOR(dev)] = size;
- invalidate_buffers(dev);
}
static void free_more_memory(void)
return 1;
}
-static void truncate_buffers(kdev_t dev)
-{
- struct block_device *bdev = bdget(kdev_t_to_nr(dev));
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
- atomic_dec(&bdev->bd_count);
-}
-
static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
{
struct buffer_head * p = bh;
extern void invalidate_inode_buffers(struct inode *);
#define invalidate_buffers(dev) __invalidate_buffers((dev), 0)
#define destroy_buffers(dev) __invalidate_buffers((dev), 1)
+extern void invalidate_bdev(struct block_device *, int);
extern void __invalidate_buffers(kdev_t dev, int);
extern void sync_inodes(kdev_t);
extern void sync_unlocked_inodes(void);
EXPORT_SYMBOL(files_lock);
EXPORT_SYMBOL(check_disk_change);
EXPORT_SYMBOL(__invalidate_buffers);
+EXPORT_SYMBOL(invalidate_bdev);
EXPORT_SYMBOL(invalidate_inodes);
EXPORT_SYMBOL(invalidate_device);
EXPORT_SYMBOL(invalidate_inode_pages);
page = list_entry(curr, struct page, list);
curr = curr->next;
- /* We cannot invalidate something in use.. */
- if (page_count(page) != 1)
- continue;
-
- /* ..or dirty.. */
+ /* We cannot invalidate something in dirty.. */
if (PageDirty(page))
continue;
if (TryLockPage(page))
continue;
+ if (page->buffers && !try_to_free_buffers(page, 0))
+ goto unlock;
+
+ if (page_count(page) != 1)
+ goto unlock;
+
__lru_cache_del(page);
__remove_inode_page(page);
UnlockPage(page);
page_cache_release(page);
+ continue;
+unlock:
+ UnlockPage(page);
+ continue;
}
spin_unlock(&pagemap_lru_lock);