default y
config SWAP
- bool
+ bool "Support for paging of anonymous memory"
default y
+ help
+ This option allows you to choose whether you want to have support
+ for socalled swap devices or swap files in your kernel that are
+ used to provide more virtual memory than the actual RAM present
+ in your computer. If unusre say Y.
config SBUS
bool
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#ifdef __KERNEL__
-struct sysinfo;
struct address_space;
-struct zone;
+struct pte_chain;
+struct sysinfo;
struct writeback_control;
+struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
/* Swap 50% full? Release swapcache more aggressively.. */
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
+/* linux/mm/oom_kill.c */
+extern void out_of_memory(void);
+
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalhigh_pages;
extern unsigned int nr_free_buffer_pages(void);
extern unsigned int nr_free_pagecache_pages(void);
-/* linux/mm/filemap.c */
-extern void FASTCALL(mark_page_accessed(struct page *));
-
/* linux/mm/swap.c */
extern void FASTCALL(lru_cache_add(struct page *));
extern void FASTCALL(lru_cache_add_active(struct page *));
extern void FASTCALL(activate_page(struct page *));
+extern void FASTCALL(mark_page_accessed(struct page *));
extern void lru_add_drain(void);
extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
extern int shrink_all_memory(int);
extern int vm_swappiness;
-/* linux/mm/oom_kill.c */
-extern void out_of_memory(void);
-
/* linux/mm/rmap.c */
-struct pte_chain;
+#ifdef CONFIG_MMU
int FASTCALL(page_referenced(struct page *));
struct pte_chain *FASTCALL(page_add_rmap(struct page *, pte_t *,
struct pte_chain *));
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
+#else
+#define page_referenced(page) \
+ TestClearPageReferenced(page)
+#endif /* CONFIG_MMU */
+
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
extern int swap_readpage(struct file *, struct page *);
page_cache_release(page)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr), 0);
-#define page_referenced(page) \
- TestClearPageReferenced(page)
#define show_swap_cache_info() /*NOTHING*/
#define free_swap_and_cache(swp) /*NOTHING*/
* Right now other parts of the system means that we
* _really_ don't want to cluster much more
*/
- init_MUTEX(&swapper_space.i_shared_sem);
}
extern struct address_space_operations swap_aops;
struct address_space swapper_space = {
- .page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
- .page_lock = RW_LOCK_UNLOCKED,
- .clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages),
- .dirty_pages = LIST_HEAD_INIT(swapper_space.dirty_pages),
- .io_pages = LIST_HEAD_INIT(swapper_space.io_pages),
- .locked_pages = LIST_HEAD_INIT(swapper_space.locked_pages),
- .host = &swapper_inode,
- .a_ops = &swap_aops,
- .backing_dev_info = &swap_backing_dev_info,
- .i_mmap = LIST_HEAD_INIT(swapper_space.i_mmap),
- .i_mmap_shared = LIST_HEAD_INIT(swapper_space.i_mmap_shared),
- .private_lock = SPIN_LOCK_UNLOCKED,
- .private_list = LIST_HEAD_INIT(swapper_space.private_list),
+ .page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
+ .page_lock = RW_LOCK_UNLOCKED,
+ .clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages),
+ .dirty_pages = LIST_HEAD_INIT(swapper_space.dirty_pages),
+ .io_pages = LIST_HEAD_INIT(swapper_space.io_pages),
+ .locked_pages = LIST_HEAD_INIT(swapper_space.locked_pages),
+ .host = &swapper_inode,
+ .a_ops = &swap_aops,
+ .backing_dev_info = &swap_backing_dev_info,
+ .i_mmap = LIST_HEAD_INIT(swapper_space.i_mmap),
+ .i_mmap_shared = LIST_HEAD_INIT(swapper_space.i_mmap_shared),
+ .i_shared_sem = __MUTEX_INITIALIZER(swapper_space.i_shared_sem),
+ .private_lock = SPIN_LOCK_UNLOCKED,
+ .private_list = LIST_HEAD_INIT(swapper_space.private_list),
};
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)