]> git.hungrycats.org Git - linux/commitdiff
[PATCH] node-local allocation for hugetlbpages
authorAndrew Morton <akpm@digeo.com>
Fri, 20 Jun 2003 15:15:38 +0000 (08:15 -0700)
committerLinus Torvalds <torvalds@home.transmeta.com>
Fri, 20 Jun 2003 15:15:38 +0000 (08:15 -0700)
From: William Lee Irwin III <wli@holomorphy.com>

The following patch implements node-local memory allocation support for
hugetlb. Successfully tested on NUMA-Q.

arch/i386/mm/hugetlbpage.c

index 78c81583dd133ae24ed8d1908c6c2ca815c29b30..f4e07347195866c673c9405bc8038d28c664246b 100644 (file)
@@ -24,9 +24,41 @@ static long    htlbpagemem;
 int     htlbpage_max;
 static long    htlbzone_pages;
 
-static LIST_HEAD(htlbpage_freelist);
+static struct list_head hugepage_freelists[MAX_NUMNODES];
 static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
 
+static void enqueue_huge_page(struct page *page)
+{
+       list_add(&page->list,
+               &hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
+}
+
+static struct page *dequeue_huge_page(void)
+{
+       int nid = numa_node_id();
+       struct page *page = NULL;
+
+       if (list_empty(&hugepage_freelists[nid])) {
+               for (nid = 0; nid < MAX_NUMNODES; ++nid)
+                       if (!list_empty(&hugepage_freelists[nid]))
+                               break;
+       }
+       if (nid >= 0 && nid < MAX_NUMNODES && !list_empty(&hugepage_freelists[nid])) {
+               page = list_entry(hugepage_freelists[nid].next, struct page, list);
+               list_del(&page->list);
+       }
+       return page;
+}
+
+static struct page *alloc_fresh_huge_page(void)
+{
+       static int nid = 0;
+       struct page *page;
+       page = alloc_pages_node(nid, GFP_HIGHUSER, HUGETLB_PAGE_ORDER);
+       nid = (nid + 1) % numnodes;
+       return page;
+}
+
 void free_huge_page(struct page *page);
 
 static struct page *alloc_hugetlb_page(void)
@@ -35,13 +67,11 @@ static struct page *alloc_hugetlb_page(void)
        struct page *page;
 
        spin_lock(&htlbpage_lock);
-       if (list_empty(&htlbpage_freelist)) {
+       page = dequeue_huge_page();
+       if (!page) {
                spin_unlock(&htlbpage_lock);
                return NULL;
        }
-
-       page = list_entry(htlbpage_freelist.next, struct page, list);
-       list_del(&page->list);
        htlbpagemem--;
        spin_unlock(&htlbpage_lock);
        set_page_count(page, 1);
@@ -253,7 +283,7 @@ void free_huge_page(struct page *page)
        INIT_LIST_HEAD(&page->list);
 
        spin_lock(&htlbpage_lock);
-       list_add(&page->list, &htlbpage_freelist);
+       enqueue_huge_page(page);
        htlbpagemem++;
        spin_unlock(&htlbpage_lock);
 }
@@ -369,7 +399,8 @@ int try_to_free_low(int count)
 
        map = NULL;
        spin_lock(&htlbpage_lock);
-       list_for_each(p, &htlbpage_freelist) {
+       /* all lowmem is on node 0 */
+       list_for_each(p, &hugepage_freelists[0]) {
                if (map) {
                        list_del(&map->list);
                        update_and_free_page(map);
@@ -406,11 +437,11 @@ int set_hugetlb_mem_size(int count)
                return (int)htlbzone_pages;
        if (lcount > 0) {       /* Increase the mem size. */
                while (lcount--) {
-                       page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
+                       page = alloc_fresh_huge_page();
                        if (page == NULL)
                                break;
                        spin_lock(&htlbpage_lock);
-                       list_add(&page->list, &htlbpage_freelist);
+                       enqueue_huge_page(page);
                        htlbpagemem++;
                        htlbzone_pages++;
                        spin_unlock(&htlbpage_lock);
@@ -451,12 +482,15 @@ static int __init hugetlb_init(void)
        int i;
        struct page *page;
 
+       for (i = 0; i < MAX_NUMNODES; ++i)
+               INIT_LIST_HEAD(&hugepage_freelists[i]);
+
        for (i = 0; i < htlbpage_max; ++i) {
-               page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
+               page = alloc_fresh_huge_page();
                if (!page)
                        break;
                spin_lock(&htlbpage_lock);
-               list_add(&page->list, &htlbpage_freelist);
+               enqueue_huge_page(page);
                spin_unlock(&htlbpage_lock);
        }
        htlbpage_max = htlbpagemem = htlbzone_pages = i;