]> git.hungrycats.org Git - linux/commitdiff
[PATCH] ENOSPC correctness
authorAndrew Morton <akpm@zip.com.au>
Mon, 18 Feb 2002 01:01:56 +0000 (17:01 -0800)
committerLinus Torvalds <torvalds@home.transmeta.com>
Mon, 18 Feb 2002 01:01:56 +0000 (17:01 -0800)
A forward-port.  This is the code which prevents ENOSPC
errors from exposing stale data within filesystems.

- in generic_file_write(), if prepare_write() fails, truncate
  the file to drop any part-added blocks.

- in __block_write_full_page(), if we hit an error,  push
  whatever buffers we _have_ mapped into the file out to disk.

- in __block_prepare_write(), if we hit an error, zero out
  any blocks which we did manage to map into the file.  This
  is because the caller won't be doing any writing to those
  blocks due to the error.

fs/buffer.c
mm/filemap.c

index edd2ec73eeb71e88ec05d825e3eb515eec8fd287..612ea5e807f70a0c8a7b2879ec711ab7ddcca620 100644 (file)
@@ -1441,6 +1441,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, get_b
        int err, i;
        unsigned long block;
        struct buffer_head *bh, *head;
+       int need_unlock;
 
        if (!PageLocked(page))
                BUG();
@@ -1496,8 +1497,34 @@ static int __block_write_full_page(struct inode *inode, struct page *page, get_b
        return 0;
 
 out:
+       /*
+        * ENOSPC, or some other error.  We may already have added some
+        * blocks to the file, so we need to write these out to avoid
+        * exposing stale data.
+        */
        ClearPageUptodate(page);
-       UnlockPage(page);
+       bh = head;
+       need_unlock = 1;
+       /* Recovery: lock and submit the mapped buffers */
+       do {
+               if (buffer_mapped(bh)) {
+                       lock_buffer(bh);
+                       set_buffer_async_io(bh);
+                       need_unlock = 0;
+               }
+               bh = bh->b_this_page;
+       } while (bh != head);
+       do {
+               struct buffer_head *next = bh->b_this_page;
+               if (buffer_mapped(bh)) {
+                       set_bit(BH_Uptodate, &bh->b_state);
+                       clear_bit(BH_Dirty, &bh->b_state);
+                       submit_bh(WRITE, bh);
+               }
+               bh = next;
+       } while (bh != head);
+       if (need_unlock)
+               UnlockPage(page);
        return err;
 }
 
@@ -1528,6 +1555,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                        continue;
                if (block_start >= to)
                        break;
+               clear_bit(BH_New, &bh->b_state);
                if (!buffer_mapped(bh)) {
                        err = get_block(inode, block, bh, 1);
                        if (err)
@@ -1562,12 +1590,35 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
         */
        while(wait_bh > wait) {
                wait_on_buffer(*--wait_bh);
-               err = -EIO;
                if (!buffer_uptodate(*wait_bh))
-                       goto out;
+                       return -EIO;
        }
        return 0;
 out:
+       /*
+        * Zero out any newly allocated blocks to avoid exposing stale
+        * data.  If BH_New is set, we know that the block was newly
+        * allocated in the above loop.
+        */
+       bh = head;
+       block_start = 0;
+       do {
+               block_end = block_start+blocksize;
+               if (block_end <= from)
+                       goto next_bh;
+               if (block_start >= to)
+                       break;
+               if (buffer_new(bh)) {
+                       if (buffer_uptodate(bh))
+                               printk(KERN_ERR "%s: zeroing uptodate buffer!\n", __FUNCTION__);
+                       memset(kaddr+block_start, 0, bh->b_size);
+                       set_bit(BH_Uptodate, &bh->b_state);
+                       mark_buffer_dirty(bh);
+               }
+next_bh:
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
        return err;
 }
 
index a421555d6304b0a1960d62dcf732d75c07f8a152..d9301785869722af3b00f9d8d33b6a91d66bd466 100644 (file)
@@ -2995,7 +2995,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
                kaddr = kmap(page);
                status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
                if (status)
-                       goto unlock;
+                       goto sync_failure;
                page_fault = __copy_from_user(kaddr+offset, buf, bytes);
                flush_dcache_page(page);
                status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
@@ -3020,6 +3020,7 @@ unlock:
                if (status < 0)
                        break;
        } while (count);
+done:
        *ppos = pos;
 
        if (cached_page)
@@ -3042,6 +3043,18 @@ fail_write:
        status = -EFAULT;
        goto unlock;
 
+sync_failure:
+       /*
+        * If blocksize < pagesize, prepare_write() may have instantiated a
+        * few blocks outside i_size.  Trim these off again.
+        */
+       kunmap(page);
+       UnlockPage(page);
+       page_cache_release(page);
+       if (pos + bytes > inode->i_size)
+               vmtruncate(inode, inode->i_size);
+       goto done;
+
 o_direct:
        written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
        if (written > 0) {