#include <linux/init.h>
#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
mm_context_t next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
/*
* Initialize the context management stuff.
*/
-void __init mmu_context_init(void)
+void __init
+mmu_context_init(void)
{
/*
* Some processors have too few contexts to reserve one for
* place to implement an LRU scheme if anyone was motivated to do it.
* -- paulus
*/
-void steal_context(void)
+void
+steal_context(void)
{
struct mm_struct *mm;
extern void __flush_dcache_icache(void *page_va);
extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#endif _PPC_CACHEFLUSH_H
-#endif __KERNEL__
+#endif /* _PPC_CACHEFLUSH_H */
+#endif /* __KERNEL__ */
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
static inline void flush_tlb_kernel_range(unsigned long start,
{
}
-#endif _PPC_TLBFLUSH_H
-#endif __KERNEL__
+#endif /* _PPC_TLBFLUSH_H */
+#endif /*__KERNEL__ */