#include <linux/config.h>
#include <asm/tlbflush.h>
-/* aim for something that fits in the L1 cache */
-#define FREE_PTE_NR 508
+/*
+ * For UP we don't need to worry about TLB flush
+ * and page free order so much..
+ */
+#ifdef CONFIG_SMP
+ #define FREE_PTE_NR 507
+ #define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
+#else
+ #define FREE_PTE_NR 1
+ #define tlb_fast_mode(tlb) 1
+#endif
/* mmu_gather_t is an opaque type used by the mm code for passing around any
* data needed by arch specific code for tlb_remove_page. This structure can
/* Users of the generic TLB shootdown code must declare this storage space. */
extern mmu_gather_t mmu_gathers[NR_CPUS];
-/* Do me later */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
/* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t.
*/
{
unsigned long nr;
- flush_tlb_mm(tlb->mm);
+ tlb_flush(tlb);
nr = tlb->nr;
- if (nr != ~0UL) {
+ if (!tlb_fast_mode(tlb)) {
unsigned long i;
tlb->nr = 0;
for (i=0; i < nr; i++)
*/
static inline void tlb_remove_page(mmu_gather_t *tlb, struct page *page)
{
- /* Handle the common case fast, first. */\
- if (tlb->nr == ~0UL) {
+ if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return;
}
+#ifndef _I386_TLB_H
+#define _I386_TLB_H
+
+/*
+ * x86 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+
+/*
+ * .. because we flush the whole mm when it
+ * fills up.
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
#include <asm-generic/tlb.h>
+
+#endif
pte_clear(ptep);
pfn = pte_pfn(pte);
+ tlb_remove_tlb_entry(tlb, pte, address+offset);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)) {
if (pte_dirty(pte))
set_page_dirty(page);
+ tlb->freed++;
tlb_remove_page(tlb, page);
}
}