]> git.hungrycats.org Git - linux/commitdiff
[PATCH] Add some low-latency scheduling points
authorAndrew Morton <akpm@digeo.com>
Tue, 26 Nov 2002 01:57:08 +0000 (17:57 -0800)
committerLinus Torvalds <torvalds@home.transmeta.com>
Tue, 26 Nov 2002 01:57:08 +0000 (17:57 -0800)
This is the first in a little batch of patches which address long-held
locks in the VFS/MM layer which are affecting our worst-case scheduling
latency, and are making CONFIG_PREEMPT not very useful.

We end up with a worst-case of 500 microseconds at 500MHz, which is
very good.  Unless you do an exit with lots of mmapped memory.
unmap_page_range() needs work.

Some of these patches also add rescheduling points for non-preemptible
kernels - where I felt that the code path could be long enough to be
perceptible.

Three places in the generic pagecache functions need manual
rescheduling points even for non-preemptible kernels:

- generic_file_read()  (Can hold the CPU for seconds)

- generic_file_write() (ditto)

- filemap_fdatawait().  This won't hold the CPU for so long, but it
  can walk many thousands of pages under the lock.  It needs a lock
  drop and scheduling point for both preemptible and non-preemptible
  kernels.  (This one's a bit ugly...)

mm/filemap.c

index 85fc6760fb51b92fea990fe6431764dd4517b89a..46d87c50b921f71f435668f4cb5a78d0001f1047 100644 (file)
@@ -174,9 +174,11 @@ int filemap_fdatawrite(struct address_space *mapping)
 int filemap_fdatawait(struct address_space * mapping)
 {
        int ret = 0;
+       int progress;
 
+restart:
+       progress = 0;
        write_lock(&mapping->page_lock);
-
         while (!list_empty(&mapping->locked_pages)) {
                struct page *page;
 
@@ -187,9 +189,18 @@ int filemap_fdatawait(struct address_space * mapping)
                else
                        list_add(&page->list, &mapping->clean_pages);
 
-               if (!PageWriteback(page))
+               if (!PageWriteback(page)) {
+                       if (++progress > 32) {
+                               if (need_resched()) {
+                                       write_unlock(&mapping->page_lock);
+                                       __cond_resched();
+                                       goto restart;
+                               }
+                       }
                        continue;
+               }
 
+               progress = 0;
                page_cache_get(page);
                write_unlock(&mapping->page_lock);
 
@@ -559,6 +570,7 @@ void do_generic_mapping_read(struct address_space *mapping,
                                break;
                }
 
+               cond_resched();
                page_cache_readahead(mapping, ra, filp, index);
 
                nr = nr - offset;
@@ -1770,6 +1782,7 @@ generic_file_write_nolock(struct file *file, const struct iovec *iov,
                if (status < 0)
                        break;
                balance_dirty_pages_ratelimited(mapping);
+               cond_resched();
        } while (count);
        *ppos = pos;