}
static __fastpath_inline
-void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp)
+void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
{
struct slub_percpu_sheaves *pcs;
void *object;
#ifdef CONFIG_NUMA
- if (static_branch_unlikely(&strict_numa)) {
- if (current->mempolicy)
- return NULL;
+ if (static_branch_unlikely(&strict_numa) &&
+ node == NUMA_NO_NODE) {
+
+ struct mempolicy *mpol = current->mempolicy;
+
+ if (mpol) {
+ /*
+ * Special BIND rule support. If the local node
+ * is in permitted set then do not redirect
+ * to a particular node.
+ * Otherwise we apply the memory policy to get
+ * the node we need to allocate on.
+ */
+ if (mpol->mode != MPOL_BIND ||
+ !node_isset(numa_mem_id(), mpol->nodes))
+
+ node = mempolicy_slab_node();
+ }
}
#endif
+ if (unlikely(node != NUMA_NO_NODE)) {
+ /*
+ * We assume the percpu sheaves contain only local objects
+ * although it's not completely guaranteed, so we verify later.
+ */
+ if (node != numa_mem_id())
+ return NULL;
+ }
+
if (!local_trylock(&s->cpu_sheaves->lock))
return NULL;
return NULL;
}
- object = pcs->main->objects[--pcs->main->size];
+ object = pcs->main->objects[pcs->main->size - 1];
+
+ if (unlikely(node != NUMA_NO_NODE)) {
+ /*
+ * Verify that the object was from the node we want. This could
+ * be false because of cpu migration during an unlocked part of
+ * the current allocation or previous freeing process.
+ */
+ if (folio_nid(virt_to_folio(object)) != node) {
+ local_unlock(&s->cpu_sheaves->lock);
+ return NULL;
+ }
+ }
+
+ pcs->main->size--;
local_unlock(&s->cpu_sheaves->lock);
if (unlikely(object))
goto out;
- if (s->cpu_sheaves && node == NUMA_NO_NODE)
- object = alloc_from_pcs(s, gfpflags);
+ if (s->cpu_sheaves)
+ object = alloc_from_pcs(s, gfpflags, node);
if (!object)
object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);