return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
+/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+unsigned long hugetlb_total_pages(void)
+{
+ return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
return 1;
}
+/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+unsigned long hugetlb_total_pages(void)
+{
+ return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
+}
+
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int *unused)
{
BUG();
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpage_free;
}
+/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+unsigned long hugetlb_total_pages(void)
+{
+ return htlbpage_total * (HPAGE_SIZE / PAGE_SIZE);
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
+/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+unsigned long hugetlb_total_pages(void)
+{
+ return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
+/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+unsigned long hugetlb_total_pages(void)
+{
+ return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
void huge_page_release(struct page *);
int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t);
+unsigned long hugetlb_total_pages(void);
struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write);
struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
{
return 0;
}
+static inline unsigned long hugetlb_total_pages(void)
+{
+ return 0;
+}
#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
#define follow_huge_addr(mm, vma, addr, write) 0
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+/* It makes sense to apply VM_ACCOUNT to this vma. */
+#define VM_MAYACCT(vma) (!!((vma)->vm_flags & VM_HUGETLB))
+
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
int correct_wcount = 0;
int error;
struct rb_node ** rb_link, * rb_parent;
+ int accountable = 1;
unsigned long charged = 0;
if (file) {
+ if (is_file_hugepages(file))
+ accountable = 0;
+
if (!file->f_op || !file->f_op->mmap)
return -ENODEV;
> current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM;
- if (!(flags & MAP_NORESERVE) || sysctl_overcommit_memory > 1) {
+ if (accountable && (!(flags & MAP_NORESERVE) ||
+ sysctl_overcommit_memory > 1)) {
if (vm_flags & VM_SHARED) {
/* Check memory availability in shmem_file_setup? */
vm_flags |= VM_ACCOUNT;
* a MAP_NORESERVE private mapping to writable will now reserve.
*/
if (newflags & VM_WRITE) {
- if (!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
+ if (!(vma->vm_flags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))
+ && VM_MAYACCT(vma)) {
charged = (end - start) >> PAGE_SHIFT;
if (security_vm_enough_memory(charged))
return -ENOMEM;
#include <linux/netlink.h>
#include <linux/ptrace.h>
#include <linux/xattr.h>
+#include <linux/hugetlb.h>
int cap_capable (struct task_struct *tsk, int cap)
{
return -ENOMEM;
}
- allowed = totalram_pages * sysctl_overcommit_ratio / 100;
+ allowed = (totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100;
allowed += total_swap_pages;
if (atomic_read(&vm_committed_space) < allowed)
#include <linux/netlink.h>
#include <net/sock.h>
#include <linux/xattr.h>
+#include <linux/hugetlb.h>
static int dummy_ptrace (struct task_struct *parent, struct task_struct *child)
{
return -ENOMEM;
}
- allowed = totalram_pages * sysctl_overcommit_ratio / 100;
+ allowed = (totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100;
allowed += total_swap_pages;
if (atomic_read(&vm_committed_space) < allowed)
#include <net/af_unix.h> /* for Unix socket types */
#include <linux/parser.h>
#include <linux/nfs_mount.h>
+#include <linux/hugetlb.h>
#include "avc.h"
#include "objsec.h"
return -ENOMEM;
}
- allowed = totalram_pages * sysctl_overcommit_ratio / 100;
+ allowed = (totalram_pages - hugetlb_total_pages())
+ * sysctl_overcommit_ratio / 100;
allowed += total_swap_pages;
if (atomic_read(&vm_committed_space) < allowed)