]> git.hungrycats.org Git - linux/commitdiff
[PATCH] reduce lock contention in do_pagecache_readahead
authorAndrew Morton <akpm@zip.com.au>
Sun, 19 May 2002 09:20:34 +0000 (02:20 -0700)
committerArnaldo Carvalho de Melo <acme@conectiva.com.br>
Sun, 19 May 2002 09:20:34 +0000 (02:20 -0700)
Anton Blanchard has a workload (the SDET benchmark) which is showing some
moderate lock contention in do_pagecache_readahead().

Seems that SDET has many threads performing seeky reads against a
cached file.  The average number of pagecache probes in a single
do_pagecache_readahead() is six, which seems reasonable.

The patch (from Anton) flips the locking around to optimise for the
fast case (page was present).  So the kernel takes the lock less often,
and does more work once it has been acquired.

mm/readahead.c

index 86d54f5b38e5bcea551726130e2b5b5c173aa0fd..b59f8f4c57bcee02059ea4a7c33a67a79f8cf2d8 100644 (file)
@@ -117,25 +117,27 @@ void do_page_cache_readahead(struct file *file,
        /*
         * Preallocate as many pages as we will need.
         */
+       read_lock(&mapping->page_lock);
        for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
                unsigned long page_offset = offset + page_idx;
                
                if (page_offset > end_index)
                        break;
 
-               read_lock(&mapping->page_lock);
                page = radix_tree_lookup(&mapping->page_tree, page_offset);
-               read_unlock(&mapping->page_lock);
                if (page)
                        continue;
 
+               read_unlock(&mapping->page_lock);
                page = page_cache_alloc(mapping);
+               read_lock(&mapping->page_lock);
                if (!page)
                        break;
                page->index = page_offset;
                list_add(&page->list, &page_pool);
                nr_to_really_read++;
        }
+       read_unlock(&mapping->page_lock);
 
        /*
         * Now start the IO.  We ignore I/O errors - if the page is not