]> git.hungrycats.org Git - linux/commitdiff
[PATCH] 2.5.13: remove VALID_PAGE
authorRoman Zippel <zippel@linux-m68k.org>
Fri, 3 May 2002 07:03:30 +0000 (00:03 -0700)
committerRussell King <rmk@arm.linux.org.uk>
Fri, 3 May 2002 07:03:30 +0000 (00:03 -0700)
This patch removes VALID_PAGE(), as the test was always too late for
discontinous memory configuration. It is replaced with pfn_valid()/
virt_addr_valid(), which are used to test the original input value.
Other helper functions:
pte_pfn() - extract the page number from a pte
pfn_to_page()/page_to_pfn() - convert a page number to/from a page struct

24 files changed:
arch/arm/mach-arc/small_page.c
arch/arm/mm/fault-armv.c
arch/ia64/mm/init.c
arch/mips/mm/umap.c
arch/mips64/mm/umap.c
arch/sh/mm/fault.c
arch/sparc/mm/generic.c
arch/sparc/mm/sun4c.c
arch/sparc64/kernel/traps.c
arch/sparc64/mm/generic.c
arch/sparc64/mm/init.c
fs/proc/array.c
include/asm-cris/processor.h
include/asm-i386/page.h
include/asm-i386/pgtable-2level.h
include/asm-i386/pgtable-3level.h
include/asm-m68k/processor.h
include/asm-sh/pgalloc.h
mm/memory.c
mm/msync.c
mm/page_alloc.c
mm/slab.c
mm/vmalloc.c
mm/vmscan.c

index a848bd68fd972bca92bebd2281637fc4436162cb..39efa5551b088d0d7da11a7a0dc5435f90ef6c21 100644 (file)
@@ -150,8 +150,8 @@ static void __free_small_page(unsigned long spage, struct order *order)
        unsigned long flags;
        struct page *page;
 
-       page = virt_to_page(spage);
-       if (VALID_PAGE(page)) {
+       if (virt_addr_valid(spage)) {
+               page = virt_to_page(spage);
 
                /*
                 * The container-page must be marked Reserved
index d06bdb302b6d125841dfd6f1c2e38cee8ac0bc74..dea4ccc1e60409a509e2dbef6c5f80991c789916 100644 (file)
@@ -240,9 +240,13 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-       struct page *page = pte_page(pte);
+       unsigned long pfn = pte_pfn(pte);
+       struct page *page;
 
-       if (VALID_PAGE(page) && page->mapping) {
+       if (!pfn_valid(pfn))
+               return;
+       page = pfn_to_page(pfn);
+       if (page->mapping) {
                if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
                        __flush_dcache_page(page);
 
index d479a43d188bbbf62d9d4bc47f8734c956cf4912..beb0bcb9b78e978b4d018fe489d5c9895d440a10 100644 (file)
@@ -109,6 +109,7 @@ free_initmem (void)
 void
 free_initrd_mem (unsigned long start, unsigned long end)
 {
+       struct page *page;
        /*
         * EFI uses 4KB pages while the kernel can use 4KB  or bigger.
         * Thus EFI and the kernel may have different page sizes. It is
@@ -147,11 +148,12 @@ free_initrd_mem (unsigned long start, unsigned long end)
                printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
 
        for (; start < end; start += PAGE_SIZE) {
-               if (!VALID_PAGE(virt_to_page(start)))
+               if (!virt_addr_valid(start))
                        continue;
-               clear_bit(PG_reserved, &virt_to_page(start)->flags);
-               set_page_count(virt_to_page(start), 1);
-               free_page(start);
+               page = virt_to_page(start);
+               clear_bit(PG_reserved, &page->flags);
+               set_page_count(page, 1);
+               __free_page(page);
                ++totalram_pages;
        }
 }
index 8b9a4cac785704b4720d4c630aae7683058126ff..e053fb5ef8eea759087f195efafc2109f8b5fb68 100644 (file)
@@ -116,8 +116,12 @@ void *vmalloc_uncached (unsigned long size)
 static inline void free_pte(pte_t page)
 {
        if (pte_present(page)) {
-               struct page *ptpage = pte_page(page);
-               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+               unsigned long pfn = pte_pfn(page);
+               struct page *ptpage;
+               if (!pfn_valid(pfn))
+                       return;
+               ptpage = pfn_to_page(pfn);
+               if (PageReserved(ptpage))
                        return;
                __free_page(ptpage);
                if (current->mm->rss <= 0)
index 4fdd3d24010ed80bde8c39e886e3c5df1e1039f3..44726e721a6bf9f5290c18be2c1e062547db3fbf 100644 (file)
@@ -115,8 +115,12 @@ void *vmalloc_uncached (unsigned long size)
 static inline void free_pte(pte_t page)
 {
        if (pte_present(page)) {
-               struct page *ptpage = pte_page(page);
-               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+               unsigned long pfn = pte_pfn(page);
+               struct page *ptpage;
+               if (!pfn_valid(pfn))
+                       return;
+               ptpage = pfn_to_page(pfn);
+               if (PageReserved(ptpage))
                        return;
                __free_page(ptpage);
                if (current->mm->rss <= 0)
index a169b0bedca8f0bdbecf4ea8dcdefa7569c6b4db..97c716854a977234619f04b47f84d9aaaf065298 100644 (file)
@@ -290,6 +290,7 @@ void update_mmu_cache(struct vm_area_struct * vma,
        unsigned long vpn;
 #if defined(__SH4__)
        struct page *page;
+       unsigned long pfn;
        unsigned long ptea;
 #endif
 
@@ -298,11 +299,14 @@ void update_mmu_cache(struct vm_area_struct * vma,
                return;
 
 #if defined(__SH4__)
-       page = pte_page(pte);
-       if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
-               unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-               __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
-               __set_bit(PG_mapped, &page->flags);
+       pfn = pte_pfn(pte);
+       if (pfn_valid(pfn)) {
+               page = pfn_to_page(pfn);
+               if (!test_bit(PG_mapped, &page->flags)) {
+                       unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+                       __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
+                       __set_bit(PG_mapped, &page->flags);
+               }
        }
 #endif
 
index 09d4247ac15c3c43d9da62bc57661a363d6b9412..5b2be11fd2fd33c3a65fd18f1f56fa0ddf10f870 100644 (file)
@@ -19,8 +19,12 @@ static inline void forget_pte(pte_t page)
        if (pte_none(page))
                return;
        if (pte_present(page)) {
-               struct page *ptpage = pte_page(page);
-               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+               unsigned long pfn = pte_pfn(page);
+               struct page *ptpage;
+               if (!pfn_valid(pfn))
+                       return;
+               ptpage = pfn_to_page(pfn);
+               if (PageReserved(ptpage))
                        return;
                page_cache_release(ptpage);
                return;
index 65836301aede4ec6af9e2f6e496988be77ffd48e..6e25abaa985d158e751a6fdbad7a092f0b079edc 100644 (file)
@@ -1327,7 +1327,7 @@ static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus
        unsigned long page;
 
        page = ((unsigned long)bufptr) & PAGE_MASK;
-       if (!VALID_PAGE(virt_to_page(page))) {
+       if (!virt_addr_valid(page)) {
                sun4c_flush_page(page);
                return (__u32)bufptr; /* already locked */
        }
@@ -2106,7 +2106,7 @@ static void sun4c_pte_clear(pte_t *ptep)  { *ptep = __pte(0); }
 static int sun4c_pmd_bad(pmd_t pmd)
 {
        return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
-               (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
+               (!virt_addr_valid(pmd_val(pmd))));
 }
 
 static int sun4c_pmd_present(pmd_t pmd)
index 9027a0d1d1b0de3b1653106b3429ef5bb33139fc..6af4276b269c8c8cec191e9f8848e2a91041fccd 100644 (file)
@@ -1312,10 +1312,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
                        }
 
                        if (recoverable) {
-                               struct page *page = virt_to_page(__va(afar));
-
-                               if (VALID_PAGE(page))
-                                       get_page(page);
+                               if (pfn_valid(afar >> PAGE_SHIFT))
+                                       get_page(pfn_to_page(afar >> PAGE_SHIFT));
                                else
                                        recoverable = 0;
 
index fcf742b5472765a29b0bd3f7b4d59053952eb7c2..266c51b450da1ac7d342ce6986626e4ae252e30e 100644 (file)
@@ -20,8 +20,12 @@ static inline void forget_pte(pte_t page)
        if (pte_none(page))
                return;
        if (pte_present(page)) {
-               struct page *ptpage = pte_page(page);
-               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+               unsigned long pfn = pte_pfn(page);
+               struct page *ptpage;
+               if (!pfn_valid(pfn))
+                       return;
+               ptpage = pfn_to_page(page);
+               if (PageReserved(ptpage))
                        return;
                page_cache_release(ptpage);
                return;
index 2fadfa3b040ad5c956a6c41aef235ee80de8f0c5..d6a092dc0f1c028b8e5aee52de663cc9dcf9da4b 100644 (file)
@@ -187,11 +187,13 @@ extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long addre
 
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
-       struct page *page = pte_page(pte);
+       struct page *page;
+       unsigned long pfn;
        unsigned long pg_flags;
 
-       if (VALID_PAGE(page) &&
-           page->mapping &&
+       pfn = pte_pfn(pte);
+       if (pfn_valid(pfn) &&
+           (page = pfn_to_page(pfn), page->mapping) &&
            ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
                int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
 
@@ -260,10 +262,14 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
                        continue;
 
                if (pte_present(pte) && pte_dirty(pte)) {
-                       struct page *page = pte_page(pte);
+                       struct page *page;
                        unsigned long pgaddr, uaddr;
+                       unsigned long pfn = pte_pfn(pte);
 
-                       if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping)
+                       if (!pfn_valid(pfn))
+                               continue;
+                       page = pfn_to_page(pfn);
+                       if (PageReserved(page) || !page->mapping)
                                continue;
                        pgaddr = (unsigned long) page_address(page);
                        uaddr = address + offset;
index 28c6a3d4a3ef02f363e9be29913cc65d7275d13c..c727d1487a1ef82f7ef369d645f79307a882c55d 100644 (file)
@@ -416,6 +416,7 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
        do {
                pte_t page = *pte;
                struct page *ptpage;
+               unsigned long pfn;
 
                address += PAGE_SIZE;
                pte++;
@@ -424,8 +425,11 @@ static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned
                ++*total;
                if (!pte_present(page))
                        continue;
-               ptpage = pte_page(page);
-               if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+               pfn = pte_pfn(page);
+               if (!pfn_valid(pfn))
+                       continue;
+               ptpage = pfn_to_page(pfn);
+               if (PageReserved(ptpage))
                        continue;
                ++*pages;
                if (pte_dirty(page))
index 7fba87a5aa1bc33afe530bd9d4ae204f18c6c7e5..bfa6e9ff3e83fa48dbf65b9b5a55a9519b4d7aa8 100644 (file)
@@ -102,7 +102,7 @@ unsigned long get_wchan(struct task_struct *p);
         unsigned long eip = 0;   \
         unsigned long regs = (unsigned long)user_regs(tsk); \
         if (regs > PAGE_SIZE && \
-            VALID_PAGE(virt_to_page(regs))) \
+            virt_addr_valid(regs)) \
               eip = ((struct pt_regs *)regs)->irp; \
         eip; })
 
index 90424fd6f6543d043f6e07ff4656c3dbee50e85c..0089c8a9739f6682f5102f875760c96925b0e9a3 100644 (file)
@@ -131,8 +131,12 @@ static __inline__ int get_order(unsigned long size)
 #define MAXMEM                 ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
 #define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define virt_to_page(kaddr)    (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
+#define pfn_to_page(pfn)       (mem_map + (pfn))
+#define page_to_pfn(page)      ((unsigned long)((page) - mem_map))
+#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)         ((pfn) < max_mapnr)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
                                 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
index 0c3f8ae5207877765c396b57b7b3b90d0673d362..2cb68902c5328b8b518ac30d874f147ba231b7c9 100644 (file)
@@ -56,8 +56,9 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 }
 #define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
 #define pte_same(a, b)         ((a).pte_low == (b).pte_low)
-#define pte_page(x)            (mem_map+((unsigned long)(((x).pte_low >> PAGE_SHIFT))))
+#define pte_page(x)            pfn_to_page(pte_pfn(x))
 #define pte_none(x)            (!(x).pte_low)
+#define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
 
 #endif /* _I386_PGTABLE_2LEVEL_H */
index 3fe1853898321e0a79d61fd35fea9ab99f4a4fb3..7650f0f4a3b772c9aeaa5853f7d0be1bdc97870b 100644 (file)
@@ -86,8 +86,9 @@ static inline int pte_same(pte_t a, pte_t b)
        return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
 }
 
-#define pte_page(x)    (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
+#define pte_page(x)    pfn_to_page(pte_pfn(x))
 #define pte_none(x)    (!(x).pte_low && !(x).pte_high)
+#define pte_pfn(x)     (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
 
 static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
 {
index 8d976bf2036f45c2895d855f4203acf7a23f45e9..7ba60318ce2862c0d8b59d66787e4fb2d86fb13b 100644 (file)
@@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *p);
     ({                 \
        unsigned long eip = 0;   \
        if ((tsk)->thread.esp0 > PAGE_SIZE && \
-           (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
+           (virt_addr_valid((tsk)->thread.esp0))) \
              eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
        eip; })
 #define        KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
index b6753c97397c9846472bc073eeb0bd362d0bc354..9cc5a7dc98ed12b8fd26cfa621ab4d2cd714d605 100644 (file)
@@ -105,10 +105,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
 
        pte_clear(ptep);
        if (!pte_not_present(pte)) {
-               struct page *page = pte_page(pte);
-               if (VALID_PAGE(page)&&
-                   (!page->mapping || !(page->mapping->i_mmap_shared)))
-                       __clear_bit(PG_mapped, &page->flags);
+               struct page *page;
+               unsigned long pfn = pte_pfn(pte);
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(page);
+                       if (!page->mapping || !page->mapping->i_mmap_shared)
+                               __clear_bit(PG_mapped, &page->flags);
+               }
        }
        return pte;
 }
index 53a8799bc4f84b50431d5b8ddfc640b524e4d413..94f7faf1c717c452b022916e82c4308d0fb74a5b 100644 (file)
@@ -76,8 +76,12 @@ mem_map_t * mem_map;
  */
 void __free_pte(pte_t pte)
 {
-       struct page *page = pte_page(pte);
-       if ((!VALID_PAGE(page)) || PageReserved(page))
+       struct page *page;
+       unsigned long pfn = pte_pfn(pte);
+       if (!pfn_valid(pfn))
+               return;
+       page = pfn_to_page(pfn);
+       if (PageReserved(page))
                return;
        if (pte_dirty(pte))
                set_page_dirty(page);           
@@ -269,6 +273,7 @@ skip_copy_pte_range:                address = (address + PMD_SIZE) & PMD_MASK;
                        do {
                                pte_t pte = *src_pte;
                                struct page *ptepage;
+                               unsigned long pfn;
                                
                                /* copy_one_pte */
 
@@ -278,9 +283,11 @@ skip_copy_pte_range:               address = (address + PMD_SIZE) & PMD_MASK;
                                        swap_duplicate(pte_to_swp_entry(pte));
                                        goto cont_copy_pte_range;
                                }
-                               ptepage = pte_page(pte);
-                               if ((!VALID_PAGE(ptepage)) || 
-                                   PageReserved(ptepage))
+                               pfn = pte_pfn(pte);
+                               if (!pfn_valid(pfn))
+                                       goto cont_copy_pte_range;
+                               ptepage = pfn_to_page(pfn);
+                               if (PageReserved(ptepage))
                                        goto cont_copy_pte_range;
 
                                /* If it's a COW mapping, write protect it both in the parent and the child */
@@ -356,9 +363,13 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
                if (pte_none(pte))
                        continue;
                if (pte_present(pte)) {
-                       struct page *page = pte_page(pte);
-                       if (VALID_PAGE(page) && !PageReserved(page))
-                               freed ++;
+                       struct page *page;
+                       unsigned long pfn = pte_pfn(pte);
+                       if (pfn_valid(pfn)) {
+                               page = pfn_to_page(pfn);
+                               if (!PageReserved(page))
+                                       freed++;
+                       }
                        /* This will eventually call __free_pte on the pte. */
                        tlb_remove_page(tlb, ptep, address + offset);
                } else {
@@ -451,6 +462,7 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *ptep, pte;
+       unsigned long pfn;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || pgd_bad(*pgd))
@@ -472,8 +484,11 @@ static struct page * follow_page(struct mm_struct *mm, unsigned long address, in
        preempt_enable();
        if (pte_present(pte)) {
                if (!write ||
-                   (pte_write(pte) && pte_dirty(pte)))
-                       return pte_page(pte);
+                   (pte_write(pte) && pte_dirty(pte))) {
+                       pfn = pte_pfn(pte);
+                       if (pfn_valid(pfn))
+                               return pfn_to_page(pfn);
+               }
        }
 
 out:
@@ -488,8 +503,6 @@ out:
 
 static inline struct page * get_page_map(struct page *page)
 {
-       if (!VALID_PAGE(page))
-               return 0;
        return page;
 }
 
@@ -860,11 +873,10 @@ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned
                end = PMD_SIZE;
        do {
                struct page *page;
-               pte_t oldpage;
-               oldpage = ptep_get_and_clear(pte);
+               pte_t oldpage = ptep_get_and_clear(pte);
+               unsigned long pfn = phys_addr >> PAGE_SHIFT;
 
-               page = virt_to_page(__va(phys_addr));
-               if ((!VALID_PAGE(page)) || PageReserved(page))
+               if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
                        set_pte(pte, mk_pte_phys(phys_addr, prot));
                forget_pte(oldpage);
                address += PAGE_SIZE;
@@ -977,10 +989,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
        unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
 {
        struct page *old_page, *new_page;
+       unsigned long pfn = pte_pfn(pte);
 
-       old_page = pte_page(pte);
-       if (!VALID_PAGE(old_page))
+       if (!pfn_valid(pfn))
                goto bad_wp_page;
+       old_page = pfn_to_page(pfn);
 
        if (!TestSetPageLocked(old_page)) {
                int reuse = can_share_swap_page(old_page);
index f292e0d27a51ff434df1e6e582bb0fac4e745cc3..2a2b31de8957e0b69cf8d9ea45cf2f0eb92e5705 100644 (file)
@@ -26,10 +26,14 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
        pte_t pte = *ptep;
 
        if (pte_present(pte) && pte_dirty(pte)) {
-               struct page *page = pte_page(pte);
-               if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
-                       flush_tlb_page(vma, address);
-                       set_page_dirty(page);
+               struct page *page;
+               unsigned long pfn = pte_pfn(pte);
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(pfn);
+                       if (!PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
+                               flush_tlb_page(vma, address);
+                               set_page_dirty(page);
+                       }
                }
        }
        return 0;
index b0a264628f69639b1a08700b527deb67536853fa..f4600396fcd65dca736121e0fc16ee1ab34000b0 100644 (file)
@@ -101,8 +101,6 @@ static void __free_pages_ok (struct page *page, unsigned int order)
                BUG();
        if (page->mapping)
                BUG();
-       if (!VALID_PAGE(page))
-               BUG();
        if (PageLocked(page))
                BUG();
        if (PageLRU(page))
@@ -295,8 +293,6 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
                                                BUG();
                                        if (page->mapping)
                                                BUG();
-                                       if (!VALID_PAGE(page))
-                                               BUG();
                                        if (PageLocked(page))
                                                BUG();
                                        if (PageLRU(page))
@@ -477,8 +473,10 @@ void __free_pages(struct page *page, unsigned int order)
 
 void free_pages(unsigned long addr, unsigned int order)
 {
-       if (addr != 0)
+       if (addr != 0) {
+               BUG_ON(!virt_addr_valid(addr));
                __free_pages(virt_to_page(addr), order);
+       }
 }
 
 /*
index d5e2817db02d802f4e7fac43debfde6d93736642..99b7559849f67da8c581495ee05cdc3b367d9543 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1415,15 +1415,16 @@ alloc_new_slab_nolock:
 #if DEBUG
 # define CHECK_NR(pg)                                          \
        do {                                                    \
-               if (!VALID_PAGE(pg)) {                          \
+               if (!virt_addr_valid(pg)) {                     \
                        printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
                                (unsigned long)objp);           \
                        BUG();                                  \
                } \
        } while (0)
-# define CHECK_PAGE(page)                                      \
+# define CHECK_PAGE(addr)                                      \
        do {                                                    \
-               CHECK_NR(page);                                 \
+               struct page *page = virt_to_page(addr);         \
+               CHECK_NR(addr);                                 \
                if (!PageSlab(page)) {                          \
                        printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
                                (unsigned long)objp);           \
@@ -1439,7 +1440,7 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
 {
        slab_t* slabp;
 
-       CHECK_PAGE(virt_to_page(objp));
+       CHECK_PAGE(objp);
        /* reduces memory footprint
         *
        if (OPTIMIZE(cachep))
@@ -1519,7 +1520,7 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
 #ifdef CONFIG_SMP
        cpucache_t *cc = cc_data(cachep);
 
-       CHECK_PAGE(virt_to_page(objp));
+       CHECK_PAGE(objp);
        if (cc) {
                int batchcount;
                if (cc->avail < cc->limit) {
@@ -1601,7 +1602,7 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
 {
        unsigned long flags;
 #if DEBUG
-       CHECK_PAGE(virt_to_page(objp));
+       CHECK_PAGE(objp);
        if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
                BUG();
 #endif
@@ -1626,7 +1627,7 @@ void kfree (const void *objp)
        if (!objp)
                return;
        local_irq_save(flags);
-       CHECK_PAGE(virt_to_page(objp));
+       CHECK_PAGE(objp);
        c = GET_PAGE_CACHE(virt_to_page(objp));
        __kmem_cache_free(c, (void*)objp);
        local_irq_restore(flags);
index fa1825cf13cbfd74d4eda9419b0ba38773e1b617..5e269765466a20fb39eecfb3fb6542b2a5bd2552 100644 (file)
@@ -45,8 +45,12 @@ static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned lo
                if (pte_none(page))
                        continue;
                if (pte_present(page)) {
-                       struct page *ptpage = pte_page(page);
-                       if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+                       struct page *ptpage;
+                       unsigned long pfn = pte_pfn(page);
+                       if (!pfn_valid(pfn))
+                               continue;
+                       ptpage = pfn_to_page(pfn);
+                       if (!PageReserved(ptpage))
                                __free_page(ptpage);
                        continue;
                }
index caa740181adf4658370a70ff85586e8db7f86211..431fdbef682453b9969d0a9e87dbb6d2ea817f97 100644 (file)
@@ -216,9 +216,10 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
 
        do {
                if (pte_present(*pte)) {
-                       struct page *page = pte_page(*pte);
+                       unsigned long pfn = pte_pfn(*pte);
+                       struct page *page = pfn_to_page(pfn);
 
-                       if (VALID_PAGE(page) && !PageReserved(page)) {
+                       if (pfn_valid(pfn) && !PageReserved(page)) {
                                count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
                                if (!count) {
                                        address += PAGE_SIZE;