]> git.hungrycats.org Git - linux/commitdiff
[PATCH] slab memory shrinking balancing fix
authorAndrew Morton <akpm@osdl.org>
Sat, 31 Jul 2004 07:47:41 +0000 (00:47 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Sat, 31 Jul 2004 07:47:41 +0000 (00:47 -0700)
The logic in shrink_slab tries to balance the proportion of slab which it
scans against the proportion of pagecache which the caller scanned.  Problem
is that with a large number of highmem LRU pages and a small number of lowmem
LRU pages, the amount of pagecache scanning appears to be very small, so we
don't push slab hard enough.

The patch changes things so that for, say, a GFP_KERNEL allocation attempt we
only consider ZONE_NORMAL and ZONE_DMA when calculating "what proportion of
the LRU did the caller just scan".

This will have the effect of shrinking slab harder in response to GFP_KERNEL
allocations than for GFP_HIGHMEM allocations.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/mm.h
mm/page_alloc.c
mm/vmscan.c

index 0205d4cd35eb8756d665194612bbeadf0cadbb38..5c584ccededa50f83d0228ae2799e3941db7e57e 100644 (file)
@@ -706,8 +706,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
 
 extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
 
-extern unsigned int nr_used_zone_pages(void);
-
 extern struct page * vmalloc_to_page(void *addr);
 extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
                int write);
index bfffe0659d6658ad72ade919e50265036c8325c8..ea75a79010fb3bb2f274be78ea42531070169c66 100644 (file)
@@ -825,17 +825,6 @@ unsigned int nr_free_pages(void)
 
 EXPORT_SYMBOL(nr_free_pages);
 
-unsigned int nr_used_zone_pages(void)
-{
-       unsigned int pages = 0;
-       struct zone *zone;
-
-       for_each_zone(zone)
-               pages += zone->nr_active + zone->nr_inactive;
-
-       return pages;
-}
-
 #ifdef CONFIG_NUMA
 unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
 {
index f142f2f188f6dbb5f05e797451917314d09e0798..9aedd8e48c88120678ede3455912f45c979c2ba7 100644 (file)
@@ -169,22 +169,25 @@ EXPORT_SYMBOL(remove_shrinker);
  * slab to avoid swapping.
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
+ *
+ * `lru_pages' represents the number of on-LRU pages in all the zones which
+ * are eligible for the caller's allocation attempt.  It is used for balancing
+ * slab reclaim versus page reclaim.
  */
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
+static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+                       unsigned long lru_pages)
 {
        struct shrinker *shrinker;
-       long pages;
 
        if (down_trylock(&shrinker_sem))
                return 0;
 
-       pages = nr_used_zone_pages();
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
 
                delta = (4 * scanned) / shrinker->seeks;
                delta *= (*shrinker->shrinker)(0, gfp_mask);
-               do_div(delta, pages + 1);
+               do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
                if (shrinker->nr < 0)
                        shrinker->nr = LONG_MAX;        /* It wrapped! */
@@ -896,6 +899,7 @@ int try_to_free_pages(struct zone **zones,
        int total_scanned = 0, total_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc;
+       unsigned long lru_pages = 0;
        int i;
 
        sc.gfp_mask = gfp_mask;
@@ -903,8 +907,12 @@ int try_to_free_pages(struct zone **zones,
 
        inc_page_state(allocstall);
 
-       for (i = 0; zones[i] != 0; i++)
-               zones[i]->temp_priority = DEF_PRIORITY;
+       for (i = 0; zones[i] != NULL; i++) {
+               struct zone *zone = zones[i];
+
+               zone->temp_priority = DEF_PRIORITY;
+               lru_pages += zone->nr_active + zone->nr_inactive;
+       }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc.nr_mapped = read_page_state(nr_mapped);
@@ -912,7 +920,7 @@ int try_to_free_pages(struct zone **zones,
                sc.nr_reclaimed = 0;
                sc.priority = priority;
                shrink_caches(zones, &sc);
-               shrink_slab(sc.nr_scanned, gfp_mask);
+               shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
                if (reclaim_state) {
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        reclaim_state->reclaimed_slab = 0;
@@ -997,7 +1005,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int all_zones_ok = 1;
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
-
+               unsigned long lru_pages = 0;
 
                if (nr_pages == 0) {
                        /*
@@ -1021,6 +1029,12 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
                        end_zone = pgdat->nr_zones - 1;
                }
 scan:
+               for (i = 0; i <= end_zone; i++) {
+                       struct zone *zone = pgdat->node_zones + i;
+
+                       lru_pages += zone->nr_active + zone->nr_inactive;
+               }
+
                /*
                 * Now scan the zone in the dma->highmem direction, stopping
                 * at the last zone which needs scanning.
@@ -1048,7 +1062,7 @@ scan:
                        sc.priority = priority;
                        shrink_zone(zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, GFP_KERNEL);
+                       shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_reclaimed += sc.nr_reclaimed;
                        if (zone->all_unreclaimable)