zones_size[ZONE_DMA] = dma_local_pfn;
zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
}
- free_area_init_node(nid, NODE_DATA(nid), NULL, zones_size, start_pfn<<PAGE_SHIFT, NULL);
+ free_area_init_node(nid, NODE_DATA(nid), NULL, zones_size, start_pfn, NULL);
lmax_mapnr = PLAT_NODE_DATA_STARTNR(nid) + PLAT_NODE_DATA_SIZE(nid);
if (lmax_mapnr > max_mapnr) {
max_mapnr = lmax_mapnr;
totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
lmem_map = NODE_MEM_MAP(nid);
- pfn = NODE_DATA(nid)->node_start_paddr >> PAGE_SHIFT;
+ pfn = NODE_DATA(nid)->node_start_pfn;
for (i = 0; i < PLAT_NODE_DATA_SIZE(nid); i++, pfn++)
if (page_is_ram(pfn) && PageReserved(lmem_map+i))
reservedpages++;
arch_adjust_zones(node, zone_size, zhole_size);
free_area_init_node(node, pgdat, 0, zone_size,
- bdata->node_boot_start, zhole_size);
+ bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
}
/*
* mem_map page array.
*/
- free_area_init_node(0, 0, 0, zones_size, PAGE_OFFSET, 0);
+ free_area_init_node(0, 0, 0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
}
zones_size[ZONE_DMA] = end_pfn + 1 - start_pfn;
free_area_init_node(node, NODE_DATA(node), 0, zones_size,
- start_pfn << PAGE_SHIFT, 0);
+ start_pfn, 0);
if ((PLAT_NODE_DATA_STARTNR(node) +
PLAT_NODE_DATA_SIZE(node)) > pagenr)
pagenr = PLAT_NODE_DATA_STARTNR(node) +
zones_size[ZONE_DMA] = max_dma - start_pfn;
zones_size[ZONE_NORMAL] = low - max_dma;
}
- free_area_init_node(0, NODE_DATA(0), 0, zones_size, __MEMORY_START, 0);
+ free_area_init_node(0, NODE_DATA(0), 0, zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
#ifdef CONFIG_DISCONTIGMEM
zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
zones_size[ZONE_NORMAL] = 0;
- free_area_init_node(1, NODE_DATA(1), 0, zones_size, __MEMORY_START_2ND, 0);
+ free_area_init_node(1, NODE_DATA(1), 0, zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
#endif
}
}
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, NULL, NULL, zones_size,
- phys_base, zholes_size);
+ phys_base >> PAGE_SHIFT, zholes_size);
}
/* P3: easy to fix, todo. Current code is utterly broken, though. */
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
free_area_init_node(0, NULL, NULL, zones_size,
- phys_base, zholes_size);
+ phys_base >> PAGE_SHIFT, zholes_size);
}
cnt = 0;
zholes_size[ZONE_DMA] = npages - pages_avail;
free_area_init_node(0, NULL, NULL, zones_size,
- phys_base, zholes_size);
+ phys_base >> PAGE_SHIFT, zholes_size);
}
device_scan();
#if 1
#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) - PLAT_NODE_DATA(n)->gendata.node_start_paddr) >> PAGE_SHIFT)
+ (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
#else
static inline unsigned long
PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
{
unsigned long temp;
- temp = p - PLAT_NODE_DATA(n)->gendata.node_start_paddr;
- return (temp >> PAGE_SHIFT);
+ temp = p >> PAGE_SHIFT;
+ return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
}
#endif
* and returns the kaddr corresponding to first physical page in the
* node's mem_map.
*/
-#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_paddr))
+#define LOCAL_BASE_ADDR(kaddr) ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_pfn << PAGE_SHIFT))
#define LOCAL_MAP_NR(kvaddr) \
(((unsigned long)(kvaddr)-LOCAL_BASE_ADDR(kvaddr)) >> PAGE_SHIFT)
#define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)
#else
#define PAGE_TO_PA(page) \
- ((((page)-(page)->zone->zone_mem_map) << PAGE_SHIFT) \
- + (page)->zone->zone_start_paddr)
+ ((( (page) - (page)->zone->zone_mem_map ) \
+ + (page)->zone->zone_start_pfn) << PAGE_SHIFT)
#endif
#ifndef CONFIG_DISCONTIGMEM
unsigned long pfn; \
\
pfn = ((unsigned long)((page)-(page)->zone->zone_mem_map)) << 32; \
- pfn += (page)->zone->zone_start_paddr << (32-PAGE_SHIFT); \
+ pfn += (page)->zone->zone_start_pfn << 32); \
pte_val(pte) = pfn | pgprot_val(pgprot); \
\
pte; \
* around in memory.
*/
#define page_to_pfn(page) \
- (((page) - page_zone(page)->zone_mem_map) \
- + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
+ (( (page) - page_zone(page)->zone_mem_map) \
+ + page_zone(page)->zone_start_pfn)
#define pfn_to_page(pfn) \
(PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))
#define PLAT_NODE_DATA_STARTNR(n) (PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) - PLAT_NODE_DATA(n)->gendata.node_start_paddr) >> PAGE_SHIFT)
+ (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
#define numa_node_id() cputocnode(current->processor)
#define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)
#else
#define PAGE_TO_PA(page) \
- ((((page)-(page)->zone->zone_mem_map) << PAGE_SHIFT) \
- + ((page)->zone->zone_start_paddr))
+ (( ((page)-(page)->zone->zone_mem_map) + \
+ (page)->zone->zone_start_pfn) << PAGE_SHIFT)
#endif
#define mk_pte(page, pgprot) \
({ \
(PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) - PLAT_NODE_DATA(n)->gendata.node_start_paddr) >> PAGE_SHIFT)
+ (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
#ifdef CONFIG_DISCONTIGMEM
* node's mem_map.
*/
#define LOCAL_BASE_ADDR(kaddr) \
- ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_paddr))
+ ((unsigned long)__va(NODE_DATA(KVADDR_TO_NID(kaddr))->node_start_pfn << PAGE_SHIFT))
#define LOCAL_MAP_NR(kvaddr) \
(((unsigned long)(kvaddr)-LOCAL_BASE_ADDR(kvaddr)) >> PAGE_SHIFT)
#ifdef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) \
((page) - page_zone(page)->zone_mem_map + \
- (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))
+ (page_zone(page)->zone_start_pfn))
#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
#else
#define pfn_to_page(pfn) (mem_map + (pfn))
#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
#define page_address(page) \
- __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
- + page_zone(page)->zone_start_paddr)
+ __va( ( ((page) - page_zone(page)->zone_mem_map) \
+ + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
- unsigned long * zones_size, unsigned long zone_start_paddr,
+ unsigned long * zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size);
extern void mem_init(void);
extern void show_mem(void);
*/
struct pglist_data *zone_pgdat;
struct page *zone_mem_map;
- unsigned long zone_start_paddr;
+ /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
+ unsigned long zone_start_pfn;
unsigned long zone_start_mapnr;
/*
struct page *node_mem_map;
unsigned long *valid_addr_bitmap;
struct bootmem_data *bdata;
- unsigned long node_start_paddr;
+ unsigned long node_start_pfn;
unsigned long node_start_mapnr;
unsigned long node_size;
int node_id;
/*
* is destination page below bounce pfn?
*/
- if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) < pfn)
+ if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < pfn)
continue;
/*
* Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
- unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size)
{
free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
- zone_start_paddr, zholes_size, pmap);
+ zone_start_pfn, zholes_size, pmap);
}
#endif /* !CONFIG_DISCONTIGMEM */
* Nodes can be initialized parallely, in no particular order.
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
- unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size)
{
int i, size = 0;
if (mem_map == NULL)
mem_map = (struct page *)PAGE_OFFSET;
- free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
+ free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_pfn,
zholes_size, pmap);
pgdat->node_id = nid;
* - clear the memory bitmaps
*/
void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
- unsigned long *zones_size, unsigned long zone_start_paddr,
+ unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size, struct page *lmem_map)
{
unsigned long i, j;
unsigned long totalpages, offset, realtotalpages;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
- BUG_ON(zone_start_paddr & ~PAGE_MASK);
-
totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) {
unsigned long size = zones_size[i];
}
*gmap = pgdat->node_mem_map = lmem_map;
pgdat->node_size = totalpages;
- pgdat->node_start_paddr = zone_start_paddr;
+ pgdat->node_start_pfn = zone_start_pfn;
pgdat->node_start_mapnr = (lmem_map - mem_map);
pgdat->nr_zones = 0;
zone->zone_mem_map = mem_map + offset;
zone->zone_start_mapnr = offset;
- zone->zone_start_paddr = zone_start_paddr;
+ zone->zone_start_pfn = zone_start_pfn;
- if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1))
+ if ((zone_start_pfn) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n");
/*
SetPageReserved(page);
INIT_LIST_HEAD(&page->list);
if (j != ZONE_HIGHMEM)
- set_page_address(page, __va(zone_start_paddr));
- zone_start_paddr += PAGE_SIZE;
+ /*
+ * The shift left won't overflow because the
+ * ZONE_NORMAL is below 4G.
+ */
+ set_page_address(page, __va(zone_start_pfn << PAGE_SHIFT));
+ zone_start_pfn++;
}
offset += size;