squash! mm: Check for SLAB_TYPESAFE_BY_RCU and __GFP_ZERO slab creation
s/creation/allocation/ and adjust.
[ paulmck: Apply feedback from kernel test robot. ]
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
diff --git a/mm/slab.c b/mm/slab.c
index ddf5737..dfa4893 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3496,8 +3496,11 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
+ void *ret;
+ /* References to typesafe memory survives free/alloc. */
+ WARN_ON_ONCE((flags & __GFP_ZERO) && (cachep->flags & SLAB_TYPESAFE_BY_RCU));
+ ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index fe8a3cf..23f2ab07 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -332,12 +332,6 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- /* References to typesafe memory survives free/alloc. */
- if ((flags & (SLAB_TYPESAFE_BY_RCU | __GFP_ZERO)) == (SLAB_TYPESAFE_BY_RCU | __GFP_ZERO)) {
- err = -EINVAL;
- goto out_unlock;
- }
-
/*
* Some allocators will constraint the set of valid flags to a subset
* of all flags. We expect them to define CACHE_CREATE_MASK in this
diff --git a/mm/slob.c b/mm/slob.c
index 60c5842..76a8340 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -631,6 +631,8 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
+ /* References to typesafe memory survives free/alloc. */
+ WARN_ON_ONCE((flags & __GFP_ZERO) && (cachep->flags & SLAB_TYPESAFE_BY_RCU));
return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
}
EXPORT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/slub.c b/mm/slub.c
index 2614740..8f04f47 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3240,8 +3240,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
+ void *ret;
+ /* References to typesafe memory survives free/alloc. */
+ WARN_ON_ONCE((gfpflags & __GFP_ZERO) && (s->flags & SLAB_TYPESAFE_BY_RCU));
+ ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags);