tlb_flush(tlb);
nr = tlb->nr;
if (!tlb_fast_mode(tlb)) {
- unsigned long i;
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
- for (i=0; i < nr; i++)
- free_page_and_swap_cache(tlb->pages[i]);
}
}
#define page_cache_get(page) get_page(page)
#define page_cache_release(page) put_page(page)
+void release_pages(struct page **pages, int nr);
static inline struct page *page_cache_alloc(struct address_space *x)
{
extern int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping);
extern void free_page_and_swap_cache(struct page *page);
+extern void free_pages_and_swap_cache(struct page **pages, int nr);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t);
/*
* Batched page_cache_release(). Decrement the reference count on all the
- * pagevec's pages. If it fell to zero then remove the page from the LRU and
+ * passed pages. If it fell to zero then remove the page from the LRU and
* free it.
*
* Avoid taking zone->lru_lock if possible, but if it is taken, retain it
* The locking in this function is against shrink_cache(): we recheck the
* page count inside the lock to see whether shrink_cache grabbed the page
* via the LRU. If it did, give up: shrink_cache will free it.
- *
- * This function reinitialises the caller's pagevec.
*/
-void __pagevec_release(struct pagevec *pvec)
+void release_pages(struct page **pages, int nr)
{
int i;
struct pagevec pages_to_free;
struct zone *zone = NULL;
pagevec_init(&pages_to_free);
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
+ for (i = 0; i < nr; i++) {
+ struct page *page = pages[i];
struct zone *pagezone;
if (PageReserved(page) || !put_page_testzero(page))
}
if (TestClearPageLRU(page))
del_page_from_lru(zone, page);
- if (page_count(page) == 0)
- pagevec_add(&pages_to_free, page);
+ if (page_count(page) == 0) {
+ if (!pagevec_add(&pages_to_free, page)) {
+ spin_unlock_irq(&zone->lru_lock);
+ pagevec_free(&pages_to_free);
+ pagevec_init(&pages_to_free);
+ spin_lock_irq(&zone->lru_lock);
+ }
+ }
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
pagevec_free(&pages_to_free);
+}
+
+void __pagevec_release(struct pagevec *pvec)
+{
+ release_pages(pvec->pages, pagevec_count(pvec));
pagevec_init(pvec);
}
return err;
}
+
/*
- * Perform a free_page(), also freeing any swap cache associated with
- * this page if it is the last user of the page. Can not do a lock_page,
- * as we are holding the page_table_lock spinlock.
+ * If we are the only user, then try to free up the swap cache.
+ *
+ * Its ok to check for PageSwapCache without the page lock
+ * here because we are going to recheck again inside
+ * exclusive_swap_page() _with_ the lock.
+ * - Marcelo
*/
-void free_page_and_swap_cache(struct page *page)
+static inline void free_swap_cache(struct page *page)
{
- /*
- * If we are the only user, then try to free up the swap cache.
- *
- * Its ok to check for PageSwapCache without the page lock
- * here because we are going to recheck again inside
- * exclusive_swap_page() _with_ the lock.
- * - Marcelo
- */
if (PageSwapCache(page) && !TestSetPageLocked(page)) {
remove_exclusive_swap_page(page);
unlock_page(page);
}
+}
+
+/*
+ * Perform a free_page(), also freeing any swap cache associated with
+ * this page if it is the last user of the page. Can not do a lock_page,
+ * as we are holding the page_table_lock spinlock.
+ */
+void free_page_and_swap_cache(struct page *page)
+{
+ free_swap_cache(page);
page_cache_release(page);
}
+/*
+ * Passed an array of pages, drop them all from swapcache and then release
+ * them. They are removed from the LRU and freed if this is their last use.
+ */
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+ const int chunk = 16;
+ struct page **pagep = pages;
+
+ while (nr) {
+ int todo = min(chunk, nr);
+ int i;
+
+ for (i = 0; i < todo; i++)
+ free_swap_cache(pagep[i]);
+ release_pages(pagep, todo);
+ pagep += todo;
+ nr -= todo;
+ }
+}
+
/*
* Lookup a swap entry in the swap cache. A found page will be returned
* unlocked and with its refcount incremented - we rely on the kernel