return NULL;
 }
 
-static ssize_t nr_hugepages_show(struct kobject *kobj,
+static ssize_t nr_hugepages_show_common(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
        struct hstate *h = kobj_to_hstate(kobj);
        return sprintf(buf, "%lu\n", h->nr_huge_pages);
 }
-static ssize_t nr_hugepages_store(struct kobject *kobj,
-               struct kobj_attribute *attr, const char *buf, size_t count)
+static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
+                       struct kobject *kobj, struct kobj_attribute *attr,
+                       const char *buf, size_t len)
 {
        int err;
-       unsigned long input;
+       unsigned long count;
        struct hstate *h = kobj_to_hstate(kobj);
+       NODEMASK_ALLOC(nodemask_t, nodes_allowed);
 
-       err = strict_strtoul(buf, 10, &input);
+       err = strict_strtoul(buf, 10, &count);
        if (err)
                return 0;
 
-       h->max_huge_pages = set_max_huge_pages(h, input, &node_online_map);
+       if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) {
+               NODEMASK_FREE(nodes_allowed);
+               nodes_allowed = &node_online_map;
+       }
+       h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
 
-       return count;
+       if (nodes_allowed != &node_online_map)
+               NODEMASK_FREE(nodes_allowed);
+
+       return len;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *buf)
+{
+       return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+              struct kobj_attribute *attr, const char *buf, size_t len)
+{
+       return nr_hugepages_store_common(false, kobj, attr, buf, len);
 }
 HSTATE_ATTR(nr_hugepages);
 
+#ifdef CONFIG_NUMA
+
+/*
+ * hstate attribute for optionally mempolicy-based constraint on persistent
+ * huge page alloc/free.
+ */
+static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *buf)
+{
+       return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
+              struct kobj_attribute *attr, const char *buf, size_t len)
+{
+       return nr_hugepages_store_common(true, kobj, attr, buf, len);
+}
+HSTATE_ATTR(nr_hugepages_mempolicy);
+#endif
+
+
 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
        &free_hugepages_attr.attr,
        &resv_hugepages_attr.attr,
        &surplus_hugepages_attr.attr,
+#ifdef CONFIG_NUMA
+       &nr_hugepages_mempolicy_attr.attr,
+#endif
        NULL,
 };
 
 }
 
 #ifdef CONFIG_SYSCTL
-int hugetlb_sysctl_handler(struct ctl_table *table, int write,
-                          void __user *buffer,
-                          size_t *length, loff_t *ppos)
+static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+                        struct ctl_table *table, int write,
+                        void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
        table->maxlen = sizeof(unsigned long);
        proc_doulongvec_minmax(table, write, buffer, length, ppos);
 
-       if (write)
-               h->max_huge_pages = set_max_huge_pages(h, tmp,
-                                                       &node_online_map);
+       if (write) {
+               NODEMASK_ALLOC(nodemask_t, nodes_allowed);
+               if (!(obey_mempolicy &&
+                              init_nodemask_of_mempolicy(nodes_allowed))) {
+                       NODEMASK_FREE(nodes_allowed);
+                       nodes_allowed = &node_states[N_HIGH_MEMORY];
+               }
+               h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
+
+               if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+                       NODEMASK_FREE(nodes_allowed);
+       }
 
        return 0;
 }
 
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+                         void __user *buffer, size_t *length, loff_t *ppos)
+{
+
+       return hugetlb_sysctl_handler_common(false, table, write,
+                                                       buffer, length, ppos);
+}
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+                         void __user *buffer, size_t *length, loff_t *ppos)
+{
+       return hugetlb_sysctl_handler_common(true, table, write,
+                                                       buffer, length, ppos);
+}
+#endif /* CONFIG_NUMA */
+
 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
                        void __user *buffer,
                        size_t *length, loff_t *ppos)
 
        }
        return zl;
 }
+
+/*
+ * init_nodemask_of_mempolicy
+ *
+ * If the current task's mempolicy is "default" [NULL], return 'false'
+ * to indicate default policy.  Otherwise, extract the policy nodemask
+ * for 'bind' or 'interleave' policy into the argument nodemask, or
+ * initialize the argument nodemask to contain the single node for
+ * 'preferred' or 'local' policy and return 'true' to indicate presence
+ * of non-default mempolicy.
+ *
+ * We don't bother with reference counting the mempolicy [mpol_get/put]
+ * because the current task is examining it's own mempolicy and a task's
+ * mempolicy is only ever changed by the task itself.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+bool init_nodemask_of_mempolicy(nodemask_t *mask)
+{
+       struct mempolicy *mempolicy;
+       int nid;
+
+       if (!(mask && current->mempolicy))
+               return false;
+
+       mempolicy = current->mempolicy;
+       switch (mempolicy->mode) {
+       case MPOL_PREFERRED:
+               if (mempolicy->flags & MPOL_F_LOCAL)
+                       nid = numa_node_id();
+               else
+                       nid = mempolicy->v.preferred_node;
+               init_nodemask_of_node(mask, nid);
+               break;
+
+       case MPOL_BIND:
+               /* Fall through */
+       case MPOL_INTERLEAVE:
+               *mask =  mempolicy->v.nodes;
+               break;
+
+       default:
+               BUG();
+       }
+
+       return true;
+}
 #endif
 
 /* Allocate a page in interleaved policy.