unsigned long flags;
struct page *page;
- page = virt_to_page(spage);
- if (VALID_PAGE(page)) {
+ if (virt_addr_valid(spage)) {
+ page = virt_to_page(spage);
/*
* The container-page must be marked Reserved
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- struct page *page = pte_page(pte);
+ unsigned long pfn = pte_pfn(pte);
+ struct page *page;
- if (VALID_PAGE(page) && page->mapping) {
+ if (!pfn_valid(pfn))
+ return;
+ page = pfn_to_page(pfn);
+ if (page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page);
void
free_initrd_mem (unsigned long start, unsigned long end)
{
+ struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
- if (!VALID_PAGE(virt_to_page(start)))
+ if (!virt_addr_valid(start))
continue;
- clear_bit(PG_reserved, &virt_to_page(start)->flags);
- set_page_count(virt_to_page(start), 1);
- free_page(start);
+ page = virt_to_page(start);
+ clear_bit(PG_reserved, &page->flags);
+ set_page_count(page, 1);
+ __free_page(page);
++totalram_pages;
}
}
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+ unsigned long pfn = pte_pfn(page);
+ struct page *ptpage;
+ if (!pfn_valid(pfn))
+ return;
+ ptpage = pfn_to_page(pfn);
+ if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+ unsigned long pfn = pte_pfn(page);
+ struct page *ptpage;
+ if (!pfn_valid(pfn))
+ return;
+ ptpage = pfn_to_page(pfn);
+ if (PageReserved(ptpage))
return;
__free_page(ptpage);
if (current->mm->rss <= 0)
unsigned long vpn;
#if defined(__SH4__)
struct page *page;
+ unsigned long pfn;
unsigned long ptea;
#endif
return;
#if defined(__SH4__)
- page = pte_page(pte);
- if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
}
#endif
if (pte_none(page))
return;
if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+ unsigned long pfn = pte_pfn(page);
+ struct page *ptpage;
+ if (!pfn_valid(pfn))
+ return;
+ ptpage = pfn_to_page(pfn);
+ if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
- if (!VALID_PAGE(virt_to_page(page))) {
+ if (!virt_addr_valid(page)) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
static int sun4c_pmd_bad(pmd_t pmd)
{
return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
- (!VALID_PAGE(virt_to_page(pmd_val(pmd)))));
+ (!virt_addr_valid(pmd_val(pmd))));
}
static int sun4c_pmd_present(pmd_t pmd)
}
if (recoverable) {
- struct page *page = virt_to_page(__va(afar));
-
- if (VALID_PAGE(page))
- get_page(page);
+ if (pfn_valid(afar >> PAGE_SHIFT))
+ get_page(pfn_to_page(afar >> PAGE_SHIFT));
else
recoverable = 0;
if (pte_none(page))
return;
if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+ unsigned long pfn = pte_pfn(page);
+ struct page *ptpage;
+ if (!pfn_valid(pfn))
+ return;
+ ptpage = pfn_to_page(page);
+ if (PageReserved(ptpage))
return;
page_cache_release(ptpage);
return;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
- struct page *page = pte_page(pte);
+ struct page *page;
+ unsigned long pfn;
unsigned long pg_flags;
- if (VALID_PAGE(page) &&
- page->mapping &&
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn) &&
+ (page = pfn_to_page(pfn), page->mapping) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
continue;
if (pte_present(pte) && pte_dirty(pte)) {
- struct page *page = pte_page(pte);
+ struct page *page;
unsigned long pgaddr, uaddr;
+ unsigned long pfn = pte_pfn(pte);
- if (!VALID_PAGE(page) || PageReserved(page) || !page->mapping)
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page) || !page->mapping)
continue;
pgaddr = (unsigned long) page_address(page);
uaddr = address + offset;
do {
pte_t page = *pte;
struct page *ptpage;
+ unsigned long pfn;
address += PAGE_SIZE;
pte++;
++*total;
if (!pte_present(page))
continue;
- ptpage = pte_page(page);
- if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
+ pfn = pte_pfn(page);
+ if (!pfn_valid(pfn))
+ continue;
+ ptpage = pfn_to_page(pfn);
+ if (PageReserved(ptpage))
continue;
++*pages;
if (pte_dirty(page))
unsigned long eip = 0; \
unsigned long regs = (unsigned long)user_regs(tsk); \
if (regs > PAGE_SIZE && \
- VALID_PAGE(virt_to_page(regs))) \
+ virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
eip; })
#define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
-#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
+#define pfn_to_page(pfn) (mem_map + (pfn))
+#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
}
#define ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)
-#define pte_page(x) (mem_map+((unsigned long)(((x).pte_low >> PAGE_SHIFT))))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
+#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
#define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
#endif /* _I386_PGTABLE_2LEVEL_H */
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
-#define pte_page(x) (mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low && !(x).pte_high)
+#define pte_pfn(x) (((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT)))
static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
{
({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
- (VALID_PAGE(virt_to_page((tsk)->thread.esp0)))) \
+ (virt_addr_valid((tsk)->thread.esp0))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
pte_clear(ptep);
if (!pte_not_present(pte)) {
- struct page *page = pte_page(pte);
- if (VALID_PAGE(page)&&
- (!page->mapping || !(page->mapping->i_mmap_shared)))
- __clear_bit(PG_mapped, &page->flags);
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(page);
+ if (!page->mapping || !page->mapping->i_mmap_shared)
+ __clear_bit(PG_mapped, &page->flags);
+ }
}
return pte;
}
*/
void __free_pte(pte_t pte)
{
- struct page *page = pte_page(pte);
- if ((!VALID_PAGE(page)) || PageReserved(page))
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+ if (!pfn_valid(pfn))
+ return;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
return;
if (pte_dirty(pte))
set_page_dirty(page);
do {
pte_t pte = *src_pte;
struct page *ptepage;
+ unsigned long pfn;
/* copy_one_pte */
swap_duplicate(pte_to_swp_entry(pte));
goto cont_copy_pte_range;
}
- ptepage = pte_page(pte);
- if ((!VALID_PAGE(ptepage)) ||
- PageReserved(ptepage))
+ pfn = pte_pfn(pte);
+ if (!pfn_valid(pfn))
+ goto cont_copy_pte_range;
+ ptepage = pfn_to_page(pfn);
+ if (PageReserved(ptepage))
goto cont_copy_pte_range;
/* If it's a COW mapping, write protect it both in the parent and the child */
if (pte_none(pte))
continue;
if (pte_present(pte)) {
- struct page *page = pte_page(pte);
- if (VALID_PAGE(page) && !PageReserved(page))
- freed ++;
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (!PageReserved(page))
+ freed++;
+ }
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
} else {
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
+ unsigned long pfn;
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || pgd_bad(*pgd))
preempt_enable();
if (pte_present(pte)) {
if (!write ||
- (pte_write(pte) && pte_dirty(pte)))
- return pte_page(pte);
+ (pte_write(pte) && pte_dirty(pte))) {
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn))
+ return pfn_to_page(pfn);
+ }
}
out:
static inline struct page * get_page_map(struct page *page)
{
- if (!VALID_PAGE(page))
- return 0;
return page;
}
end = PMD_SIZE;
do {
struct page *page;
- pte_t oldpage;
- oldpage = ptep_get_and_clear(pte);
+ pte_t oldpage = ptep_get_and_clear(pte);
+ unsigned long pfn = phys_addr >> PAGE_SHIFT;
- page = virt_to_page(__va(phys_addr));
- if ((!VALID_PAGE(page)) || PageReserved(page))
+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
set_pte(pte, mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
address += PAGE_SIZE;
unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
{
struct page *old_page, *new_page;
+ unsigned long pfn = pte_pfn(pte);
- old_page = pte_page(pte);
- if (!VALID_PAGE(old_page))
+ if (!pfn_valid(pfn))
goto bad_wp_page;
+ old_page = pfn_to_page(pfn);
if (!TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
pte_t pte = *ptep;
if (pte_present(pte) && pte_dirty(pte)) {
- struct page *page = pte_page(pte);
- if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
- flush_tlb_page(vma, address);
- set_page_dirty(page);
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (!PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
+ flush_tlb_page(vma, address);
+ set_page_dirty(page);
+ }
}
}
return 0;
BUG();
if (page->mapping)
BUG();
- if (!VALID_PAGE(page))
- BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
BUG();
if (page->mapping)
BUG();
- if (!VALID_PAGE(page))
- BUG();
if (PageLocked(page))
BUG();
if (PageLRU(page))
void free_pages(unsigned long addr, unsigned int order)
{
- if (addr != 0)
+ if (addr != 0) {
+ BUG_ON(!virt_addr_valid(addr));
__free_pages(virt_to_page(addr), order);
+ }
}
/*
#if DEBUG
# define CHECK_NR(pg) \
do { \
- if (!VALID_PAGE(pg)) { \
+ if (!virt_addr_valid(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
} while (0)
-# define CHECK_PAGE(page) \
+# define CHECK_PAGE(addr) \
do { \
- CHECK_NR(page); \
+ struct page *page = virt_to_page(addr); \
+ CHECK_NR(addr); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
{
slab_t* slabp;
- CHECK_PAGE(virt_to_page(objp));
+ CHECK_PAGE(objp);
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
- CHECK_PAGE(virt_to_page(objp));
+ CHECK_PAGE(objp);
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
{
unsigned long flags;
#if DEBUG
- CHECK_PAGE(virt_to_page(objp));
+ CHECK_PAGE(objp);
if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
if (!objp)
return;
local_irq_save(flags);
- CHECK_PAGE(virt_to_page(objp));
+ CHECK_PAGE(objp);
c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
if (pte_none(page))
continue;
if (pte_present(page)) {
- struct page *ptpage = pte_page(page);
- if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
+ struct page *ptpage;
+ unsigned long pfn = pte_pfn(page);
+ if (!pfn_valid(pfn))
+ continue;
+ ptpage = pfn_to_page(pfn);
+ if (!PageReserved(ptpage))
__free_page(ptpage);
continue;
}
do {
if (pte_present(*pte)) {
- struct page *page = pte_page(*pte);
+ unsigned long pfn = pte_pfn(*pte);
+ struct page *page = pfn_to_page(pfn);
- if (VALID_PAGE(page) && !PageReserved(page)) {
+ if (pfn_valid(pfn) && !PageReserved(page)) {
count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
if (!count) {
address += PAGE_SIZE;