*/
#include <linux/config.h>
-#include <linux/linkage.h>
#define page_map_lock(page) \
bit_spin_lock(PG_maplock, (unsigned long *)&(page)->flags)
#ifdef CONFIG_MMU
-void fastcall page_add_anon_rmap(struct page *,
- struct mm_struct *, unsigned long addr);
-void fastcall page_add_file_rmap(struct page *);
-void fastcall page_remove_rmap(struct page *);
+void page_add_anon_rmap(struct page *, struct mm_struct *, unsigned long);
+void page_add_file_rmap(struct page *);
+void page_remove_rmap(struct page *);
/**
* page_dup_rmap - duplicate pte mapping to a page
page_map_unlock(page);
}
-int fastcall mremap_move_anon_rmap(struct page *page, unsigned long addr);
+int mremap_move_anon_rmap(struct page *page, unsigned long addr);
/**
* mremap_moved_anon_rmap - does new address clash with that noted?
/*
* Called from mm/vmscan.c to handle paging out
*/
-int fastcall page_referenced(struct page *);
-int fastcall try_to_unmap(struct page *);
+int page_referenced(struct page *);
+int try_to_unmap(struct page *);
#else /* !CONFIG_MMU */
}
/*
- * The warning below may appear if page_referenced catches the
- * page in between page_add_{anon,file}_rmap and its replacement
+ * The warning below may appear if page_referenced_anon catches
+ * the page in between page_add_anon_rmap and its replacement
* demanded by mremap_moved_anon_page: so remove the warning once
* we're convinced that anonmm rmap really is finding its pages.
*/
* returns the number of ptes which referenced the page.
* Caller needs to hold the rmap lock.
*/
-int fastcall page_referenced(struct page *page)
+int page_referenced(struct page *page)
{
int referenced = 0;
*
* The caller needs to hold the mm->page_table_lock.
*/
-void fastcall page_add_anon_rmap(struct page *page,
+void page_add_anon_rmap(struct page *page,
struct mm_struct *mm, unsigned long address)
{
struct anonmm *anonmm = mm->anonmm;
*
* The caller needs to hold the mm->page_table_lock.
*/
-void fastcall page_add_file_rmap(struct page *page)
+void page_add_file_rmap(struct page *page)
{
BUG_ON(PageAnon(page));
if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
*
* Caller needs to hold the mm->page_table_lock.
*/
-void fastcall page_remove_rmap(struct page *page)
+void page_remove_rmap(struct page *page)
{
BUG_ON(PageReserved(page));
BUG_ON(!page->mapcount);
* If it is shared, then caller must take a copy of the page instead:
* not very clever, but too rare a case to merit cleverness.
*/
-int fastcall mremap_move_anon_rmap(struct page *page, unsigned long address)
+int mremap_move_anon_rmap(struct page *page, unsigned long address)
{
int move = 0;
if (page->mapcount == 1) {
* SWAP_AGAIN - we missed a trylock, try again later
* SWAP_FAIL - the page is unswappable
*/
-int fastcall try_to_unmap(struct page *page)
+int try_to_unmap(struct page *page)
{
int ret;