]> git.hungrycats.org Git - linux/commitdiff
[SPARC64]: Changes to accomodate booting from non-phys_base memory
authorBen Collins <bcollins@debian.org>
Tue, 27 Jan 2004 16:23:02 +0000 (11:23 -0500)
committerBen Collins <bmc@hopper.(none)>
Tue, 27 Jan 2004 16:23:02 +0000 (11:23 -0500)
arch/sparc64/kernel/setup.c
arch/sparc64/mm/init.c
include/asm-sparc64/io.h

index c6ec788fde48384810f32e5adee15b780c64d32d..0942d15e80656e22b64273c505cf216de0337767 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/hardirq.h>
 #include <asm/mmu_context.h>
 #include <asm/timer.h>
+#include <asm/sections.h>
 
 #ifdef CONFIG_IP_PNP
 #include <net/ipconfig.h>
@@ -522,6 +523,22 @@ void __init setup_arch(char **cmdline_p)
        }
        pfn_base = phys_base >> PAGE_SHIFT;
 
+       switch (tlb_type) {
+       default:
+       case spitfire:
+               kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
+               kern_base &= _PAGE_PADDR_SF;
+               break;
+
+       case cheetah:
+       case cheetah_plus:
+               kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
+               kern_base &= _PAGE_PADDR;
+               break;
+       };
+
+       kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
        if (!root_flags)
                root_mountflags &= ~MS_RDONLY;
        ROOT_DEV = old_decode_dev(root_dev);
index b42699887bf331b144aa95094f5678d0463b6310..dc0a51a93e8c69ffaebe7c8102266f4d7c42ba8f 100644 (file)
@@ -47,8 +47,19 @@ unsigned long *sparc64_valid_addr_bitmap;
 
 /* Ugly, but necessary... -DaveM */
 unsigned long phys_base;
+unsigned long kern_base;
+unsigned long kern_size;
 unsigned long pfn_base;
 
+/* This is even uglier. We have a problem where the kernel may not be
+ * located at phys_base. However, initial __alloc_bootmem() calls need to
+ * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
+ * those page mappings wont work. Things are ok after inherit_prom_mappings
+ * is called though. Dave says he'll clean this up some other time.
+ * -- BenC
+ */
+static unsigned long bootmap_base;
+
 /* get_new_mmu_context() uses "cache + 1".  */
 spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
@@ -433,7 +444,7 @@ static void inherit_prom_mappings(void)
        n += 5 * sizeof(struct linux_prom_translation);
        for (tsz = 1; tsz < n; tsz <<= 1)
                /* empty */;
-       trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, 0UL);
+       trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
        if (trans == NULL) {
                prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
                prom_halt();
@@ -453,7 +464,7 @@ static void inherit_prom_mappings(void)
         * in inherit_locked_prom_mappings()).
         */
 #define OBP_PMD_SIZE 2048
-       prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, 0UL);
+       prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
        if (prompmd == NULL)
                early_pgtable_allocfail("pmd");
        memset(prompmd, 0, OBP_PMD_SIZE);
@@ -471,7 +482,7 @@ static void inherit_prom_mappings(void)
                                if (pmd_none(*pmdp)) {
                                        ptep = __alloc_bootmem(BASE_PAGE_SIZE,
                                                               BASE_PAGE_SIZE,
-                                                              0UL);
+                                                              bootmap_base);
                                        if (ptep == NULL)
                                                early_pgtable_allocfail("pte");
                                        memset(ptep, 0, BASE_PAGE_SIZE);
@@ -1327,15 +1338,8 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
         * image.  The kernel is hard mapped below PAGE_OFFSET in a
         * 4MB locked TLB translation.
         */
-       start_pfn  = PAGE_ALIGN((unsigned long) _end) -
-               ((unsigned long) KERNBASE);
-
-       /* Adjust up to the physical address where the kernel begins. */
-       start_pfn += phys_base;
+       start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
 
-       /* Now shift down to get the real physical page frame number. */
-       start_pfn >>= PAGE_SHIFT;
-       
        bootmap_pfn = start_pfn;
 
        end_pfn = end_of_phys_memory >> PAGE_SHIFT;
@@ -1365,11 +1369,13 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
        min_low_pfn = pfn_base;
 
 #ifdef CONFIG_DEBUG_BOOTMEM
-       prom_printf("init_bootmem(spfn[%lx], bpfn[%lx], mlpfn[%lx])\n",
-                   start_pfn, bootmap_pfn, max_low_pfn);
+       prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
+                   min_low_pfn, bootmap_pfn, max_low_pfn);
 #endif
        bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
 
+       bootmap_base = bootmap_pfn << PAGE_SHIFT;
+
        /* Now register the available physical memory with the
         * allocator.
         */
@@ -1378,8 +1384,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
                            i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
 #endif
-               free_bootmem(sp_banks[i].base_addr,
-                            sp_banks[i].num_bytes);
+               free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
        }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -1395,12 +1400,11 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
        }
 #endif
        /* Reserve the kernel text/data/bss. */
-       size = (start_pfn << PAGE_SHIFT) - phys_base;
 #ifdef CONFIG_DEBUG_BOOTMEM
-       prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", phys_base, size);
+       prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
 #endif
-       reserve_bootmem(phys_base, size);
-       *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+       reserve_bootmem(kern_base, kern_size);
+       *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
 
        /* Reserve the bootmem map.   We do not account for it
         * in pages_avail because we will release that memory
@@ -1429,7 +1433,7 @@ void __init paging_init(void)
        extern pmd_t swapper_pmd_dir[1024];
        extern unsigned int sparc64_vpte_patchme1[1];
        extern unsigned int sparc64_vpte_patchme2[1];
-       unsigned long alias_base = phys_base + PAGE_OFFSET;
+       unsigned long alias_base = kern_base + PAGE_OFFSET;
        unsigned long second_alias_page = 0;
        unsigned long pt, flags, end_pfn, pages_avail;
        unsigned long shift = alias_base - ((unsigned long)KERNBASE);
@@ -1449,7 +1453,7 @@ void __init paging_init(void)
         * if this were not true we wouldn't boot up to this point
         * anyways.
         */
-       pt  = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
+       pt  = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
        pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
        local_irq_save(flags);
        if (tlb_type == spitfire) {
@@ -1543,7 +1547,16 @@ void __init paging_init(void)
        }
 
        inherit_locked_prom_mappings(1);
-       
+
+#ifdef CONFIG_SUN_SERIAL
+       /* This does not logically belong here, but we need to call it at
+        * the moment we are able to use the bootmem allocator. This _has_
+        * to be done after the prom_mappings above so since
+        * __alloc_bootmem() doesn't work correctly until then.
+        */
+       sun_serial_setup();
+#endif
+
        /* We only created DTLB mapping of this stuff. */
        spitfire_flush_dtlb_nucleus_page(alias_base);
        if (second_alias_page)
@@ -1714,17 +1727,15 @@ void __init mem_init(void)
        i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
        i += 1;
        sparc64_valid_addr_bitmap = (unsigned long *)
-               __alloc_bootmem(i << 3, SMP_CACHE_BYTES, 0UL);
+               __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
        if (sparc64_valid_addr_bitmap == NULL) {
                prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
                prom_halt();
        }
        memset(sparc64_valid_addr_bitmap, 0, i << 3);
 
-       addr = PAGE_OFFSET + phys_base;
-       last = PAGE_ALIGN((unsigned long)_end) -
-               ((unsigned long) KERNBASE);
-       last += PAGE_OFFSET + phys_base;
+       addr = PAGE_OFFSET + kern_base;
+       last = PAGE_ALIGN(kern_size) + addr;
        while (addr < last) {
                set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
                addr += PAGE_SIZE;
@@ -1764,7 +1775,7 @@ void __init mem_init(void)
                /* Put empty_pg_dir on pgd_quicklist */
                extern pgd_t empty_pg_dir[1024];
                unsigned long addr = (unsigned long)empty_pg_dir;
-               unsigned long alias_base = phys_base + PAGE_OFFSET -
+               unsigned long alias_base = kern_base + PAGE_OFFSET -
                        (long)(KERNBASE);
                
                memset(empty_pg_dir, 0, sizeof(empty_pg_dir));
@@ -1800,7 +1811,7 @@ void free_initmem (void)
                struct page *p;
 
                page = (addr +
-                       ((unsigned long) __va(phys_base)) -
+                       ((unsigned long) __va(kern_base)) -
                        ((unsigned long) KERNBASE));
                p = virt_to_page(page);
 
index 4cf6e15170f012d1e176157fe3092579ea9d012c..0745a2349ace1e8353c6df4f8130a55363a50c5f 100644 (file)
@@ -19,7 +19,7 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
 #define bus_to_virt bus_to_virt_not_defined_use_pci_map
 
 /* BIO layer definitions. */
-extern unsigned long phys_base;
+extern unsigned long phys_base, kern_base, kern_size;
 #define page_to_phys(page)     ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
 #define BIO_VMERGE_BOUNDARY    8192