]> git.hungrycats.org Git - linux/commitdiff
mm: replace various uses of num_physpages by totalram_pages
authorJan Beulich <JBeulich@novell.com>
Tue, 22 Sep 2009 00:03:05 +0000 (17:03 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 1 Apr 2010 22:55:50 +0000 (15:55 -0700)
commit 4481374ce88ba8f460c8b89f2572027bd27057d0 upstream.

Sizing of memory allocations shouldn't depend on the number of physical
pages found in a system, as that generally includes (perhaps a huge amount
of) non-RAM pages.  The amount of what actually is usable as storage
should instead be used as a basis here.

Some of the calculations (i.e.  those not intending to use high memory)
should likely even use (totalram_pages - totalhigh_pages).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
21 files changed:
arch/x86/kernel/microcode_core.c
drivers/char/agp/backend.c
drivers/parisc/ccio-dma.c
drivers/parisc/sba_iommu.c
drivers/xen/balloon.c
fs/ntfs/malloc.h
include/linux/mm.h
init/main.c
mm/slab.c
mm/swap.c
mm/vmalloc.c
net/core/sock.c
net/dccp/proto.c
net/decnet/dn_route.c
net/ipv4/route.c
net/ipv4/tcp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/x_tables.c
net/netfilter/xt_hashlimit.c
net/netlink/af_netlink.c
net/sctp/protocol.c

index 9371448290ac3c4f3a3489b0d9edbe55a5dd689b..0511035f3b847ffbe9eff9ef5d8634d4c96d5deb 100644 (file)
@@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
 {
        ssize_t ret = -EINVAL;
 
-       if ((len >> PAGE_SHIFT) > num_physpages) {
-               pr_err("microcode: too much data (max %ld pages)\n", num_physpages);
+       if ((len >> PAGE_SHIFT) > totalram_pages) {
+               pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
                return ret;
        }
 
index cfa5a649dfe766d3d814637a435fb7d83fb80dc2..19ce9d6c69f18ea9cec2bb19c8266d22896df928 100644 (file)
@@ -114,9 +114,9 @@ static int agp_find_max(void)
        long memory, index, result;
 
 #if PAGE_SHIFT < 20
-       memory = num_physpages >> (20 - PAGE_SHIFT);
+       memory = totalram_pages >> (20 - PAGE_SHIFT);
 #else
-       memory = num_physpages << (PAGE_SHIFT - 20);
+       memory = totalram_pages << (PAGE_SHIFT - 20);
 #endif
        index = 1;
 
index a45b0c0d574e2d909c2180b6fa752d9100e770f6..a6b4a5a53d40d8f123345a6e3f5400c409a8e3a4 100644 (file)
@@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc)
        ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
        */
 
-       iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver));
+       iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
 
        /* limit IOVA space size to 1MB-1GB */
 
@@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc)
 
        DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
                        __func__, ioc->ioc_regs,
-                       (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
+                       (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
                        iova_space_size>>20,
                        iov_order + PAGE_SHIFT);
 
index 123d8fe3427d0a1286f8ebd5d1cf684d13969993..57a6d19eba4c73877283b7779229d5619cc4d0bd 100644 (file)
@@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
        ** for DMA hints - ergo only 30 bits max.
        */
 
-       iova_space_size = (u32) (num_physpages/global_ioc_cnt);
+       iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
 
        /* limit IOVA space size to 1MB-1GB */
        if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
@@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
        DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
                        __func__,
                        ioc->ioc_hpa,
-                       (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
+                       (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
                        iova_space_size>>20,
                        iov_order + PAGE_SHIFT);
 
index f5bbd9e8341660a6d4f5195d1b42350db0288d5d..1b7123eb5d7b8e70926938b5fc4a9c4b451d5522 100644 (file)
@@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats;
 /* We increase/decrease in batches which fit in a page */
 static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
 
-/* VM /proc information for memory */
-extern unsigned long totalram_pages;
-
 #ifdef CONFIG_HIGHMEM
-extern unsigned long totalhigh_pages;
 #define inc_totalhigh_pages() (totalhigh_pages++)
 #define dec_totalhigh_pages() (totalhigh_pages--)
 #else
index cd0be3f5c3cd34a5d5209adafcf6ad518ff7da0b..a44b14cbceebcd9f8fd7cb9be561c12373c0c271 100644 (file)
@@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
                return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
                /* return (void *)__get_free_page(gfp_mask); */
        }
-       if (likely(size >> PAGE_SHIFT < num_physpages))
+       if (likely((size >> PAGE_SHIFT) < totalram_pages))
                return __vmalloc(size, gfp_mask, PAGE_KERNEL);
        return NULL;
 }
index 9a72cc78e6b817d2b7c24b076eef7bd0061235f2..ef3603991d6f51f5d26b1a62560959b4a2d0bc9d 100644 (file)
@@ -25,6 +25,7 @@ extern unsigned long max_mapnr;
 #endif
 
 extern unsigned long num_physpages;
+extern unsigned long totalram_pages;
 extern void * high_memory;
 extern int page_cluster;
 
index 1ec6347ee332df09902eb40309878f21ca34c462..7901959497a88aad1b6d873a28df11d8691fa574 100644 (file)
@@ -686,12 +686,12 @@ asmlinkage void __init start_kernel(void)
 #endif
        thread_info_cache_init();
        cred_init();
-       fork_init(num_physpages);
+       fork_init(totalram_pages);
        proc_caches_init();
        buffer_init();
        key_init();
        security_init();
-       vfs_caches_init(num_physpages);
+       vfs_caches_init(totalram_pages);
        radix_tree_init();
        signals_init();
        /* rootfs populating might need page-writeback */
index 7b5d4deacfcd96f79460e3e91d159f22b47dec5f..7dfa481c96bade62ae4ba34299dcd4fb8d79cdb3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void)
         * Fragmentation resistance on low memory - only use bigger
         * page orders on machines with more than 32MB of memory.
         */
-       if (num_physpages > (32 << 20) >> PAGE_SHIFT)
+       if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
                slab_break_gfp_order = BREAK_GFP_ORDER_HI;
 
        /* Bootstrap is tricky, because several objects are allocated
index cb29ae5d33abfce703991cbfebd7bc1956752942..9387f17f99c5e9ff6f9c25c119e8008e60d6860b 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
  */
 void __init swap_setup(void)
 {
-       unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+       unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
 
 #ifdef CONFIG_SWAP
        bdi_init(swapper_space.backing_dev_info);
index 0b91e40af9ed9c067a23ceb7dd0245baa0fb786d..f603667f5d027e2b8483fa0fa225084502daf58f 100644 (file)
@@ -1366,7 +1366,7 @@ void *vmap(struct page **pages, unsigned int count,
 
        might_sleep();
 
-       if (count > num_physpages)
+       if (count > totalram_pages)
                return NULL;
 
        area = get_vm_area_caller((count << PAGE_SHIFT), flags,
@@ -1473,7 +1473,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
        unsigned long real_size = size;
 
        size = PAGE_ALIGN(size);
-       if (!size || (size >> PAGE_SHIFT) > num_physpages)
+       if (!size || (size >> PAGE_SHIFT) > totalram_pages)
                return NULL;
 
        area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
index dd120d8cc8e62d2b63b0b91dcdddace13bff4c1a..a8c9b14f07eda22c3498e187d09a6c75ef23de33 100644 (file)
@@ -1196,12 +1196,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
 
 void __init sk_init(void)
 {
-       if (num_physpages <= 4096) {
+       if (totalram_pages <= 4096) {
                sysctl_wmem_max = 32767;
                sysctl_rmem_max = 32767;
                sysctl_wmem_default = 32767;
                sysctl_rmem_default = 32767;
-       } else if (num_physpages >= 131072) {
+       } else if (totalram_pages >= 131072) {
                sysctl_wmem_max = 131071;
                sysctl_rmem_max = 131071;
        }
index 1bca9205104e6c5ff41b4e3562825f9cf1079e9b..d9c44d82e86e1593a5db6734539229d8b4f60bd5 100644 (file)
@@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
         *
         * The methodology is similar to that of the buffer cache.
         */
-       if (num_physpages >= (128 * 1024))
-               goal = num_physpages >> (21 - PAGE_SHIFT);
+       if (totalram_pages >= (128 * 1024))
+               goal = totalram_pages >> (21 - PAGE_SHIFT);
        else
-               goal = num_physpages >> (23 - PAGE_SHIFT);
+               goal = totalram_pages >> (23 - PAGE_SHIFT);
 
        if (thash_entries)
                goal = (thash_entries *
index 1d6ca8a98dc62bf4f6e6660e4b3662393089d638..44d0d3bd2914f1725c903e78da3d229f1eb7fc80 100644 (file)
@@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
        dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
        add_timer(&dn_route_timer);
 
-       goal = num_physpages >> (26 - PAGE_SHIFT);
+       goal = totalram_pages >> (26 - PAGE_SHIFT);
 
        for(order = 0; (1UL << order) < goal; order++)
                /* NOTHING */;
index 278f46f5011beb2ab85747543f84dfd3ce7c6d1c..62ccdf12f5f000259fb62392749d23b28a271c05 100644 (file)
@@ -3412,7 +3412,7 @@ int __init ip_rt_init(void)
                alloc_large_system_hash("IP route cache",
                                        sizeof(struct rt_hash_bucket),
                                        rhash_entries,
-                                       (num_physpages >= 128 * 1024) ?
+                                       (totalram_pages >= 128 * 1024) ?
                                        15 : 17,
                                        0,
                                        &rt_hash_log,
index 91145244ea630777e7e4a2e7f112f3c31bc7f90e..33ed849d371563173f5786e89479ec2628d31e7d 100644 (file)
@@ -2862,7 +2862,7 @@ void __init tcp_init(void)
                alloc_large_system_hash("TCP established",
                                        sizeof(struct inet_ehash_bucket),
                                        thash_entries,
-                                       (num_physpages >= 128 * 1024) ?
+                                       (totalram_pages >= 128 * 1024) ?
                                        13 : 15,
                                        0,
                                        &tcp_hashinfo.ehash_size,
@@ -2879,7 +2879,7 @@ void __init tcp_init(void)
                alloc_large_system_hash("TCP bind",
                                        sizeof(struct inet_bind_hashbucket),
                                        tcp_hashinfo.ehash_size,
-                                       (num_physpages >= 128 * 1024) ?
+                                       (totalram_pages >= 128 * 1024) ?
                                        13 : 15,
                                        0,
                                        &tcp_hashinfo.bhash_size,
index 4299db786d4dee6b3b0960390c7e8a1cea0ee1a9..e7407e5bfacc1fb06865dd7a7674d8c7faa9849c 100644 (file)
@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
         * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
        if (!nf_conntrack_htable_size) {
                nf_conntrack_htable_size
-                       = (((num_physpages << PAGE_SHIFT) / 16384)
+                       = (((totalram_pages << PAGE_SHIFT) / 16384)
                           / sizeof(struct hlist_head));
-               if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+               if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
                        nf_conntrack_htable_size = 16384;
                if (nf_conntrack_htable_size < 32)
                        nf_conntrack_htable_size = 32;
index 025d1a0af78b43c14a38cc0c5b406b36dd5887f5..0319516b7d3990cfc06a0bbe62286749869d5291 100644 (file)
@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
        int cpu;
 
        /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
-       if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+       if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
                return NULL;
 
        newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
index 219dcdbe388cb08db07824ab53fe4c02089e5685..dd16e404424fe7610449e8d95c1c809aada1af3f 100644 (file)
@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
        if (minfo->cfg.size)
                size = minfo->cfg.size;
        else {
-               size = ((num_physpages << PAGE_SHIFT) / 16384) /
+               size = ((totalram_pages << PAGE_SHIFT) / 16384) /
                       sizeof(struct list_head);
-               if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+               if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
                        size = 8192;
                if (size < 16)
                        size = 16;
@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
        if (minfo->cfg.size) {
                size = minfo->cfg.size;
        } else {
-               size = (num_physpages << PAGE_SHIFT) / 16384 /
+               size = (totalram_pages << PAGE_SHIFT) / 16384 /
                       sizeof(struct list_head);
-               if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
+               if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
                        size = 8192;
                if (size < 16)
                        size = 16;
index 2936fa3b6dc8f460177033bdbda1e95416cec7f4..8b9a87c87753cdb305cddc57b3e404f1e08909d3 100644 (file)
@@ -2026,10 +2026,10 @@ static int __init netlink_proto_init(void)
        if (!nl_table)
                goto panic;
 
-       if (num_physpages >= (128 * 1024))
-               limit = num_physpages >> (21 - PAGE_SHIFT);
+       if (totalram_pages >= (128 * 1024))
+               limit = totalram_pages >> (21 - PAGE_SHIFT);
        else
-               limit = num_physpages >> (23 - PAGE_SHIFT);
+               limit = totalram_pages >> (23 - PAGE_SHIFT);
 
        order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
        limit = (1UL << order) / sizeof(struct hlist_head);
index a76da657244a8e38fdb692426857130034da5fd0..4905051ffd8387b6e4822d32119713a5b4e6474e 100644 (file)
@@ -1186,10 +1186,10 @@ SCTP_STATIC __init int sctp_init(void)
        /* Size and allocate the association hash table.
         * The methodology is similar to that of the tcp hash tables.
         */
-       if (num_physpages >= (128 * 1024))
-               goal = num_physpages >> (22 - PAGE_SHIFT);
+       if (totalram_pages >= (128 * 1024))
+               goal = totalram_pages >> (22 - PAGE_SHIFT);
        else
-               goal = num_physpages >> (24 - PAGE_SHIFT);
+               goal = totalram_pages >> (24 - PAGE_SHIFT);
 
        for (order = 0; (1UL << order) < goal; order++)
                ;