Revision f0a3a24b532d9a7e56a33c5112b2a212ed6ec580 authored by Roman Gushchin on 12 July 2019, 03:56:27 UTC, committed by Linus Torvalds on 12 July 2019, 18:05:44 UTC
Currently each charged slab page holds a reference to the cgroup to which
it's charged.  Kmem_caches are held by the memcg and are released all
together with the memory cgroup.  It means that none of kmem_caches are
released unless at least one reference to the memcg exists, which is very
far from optimal.

Let's rework it in a way that allows releasing individual kmem_caches as
soon as the cgroup is offline, the kmem_cache is empty and there are no
pending allocations.

To make it possible, let's introduce a new percpu refcounter for non-root
kmem caches.  The counter is initialized to the percpu mode, and is
switched to the atomic mode during kmem_cache deactivation.  The counter
is bumped for every charged page and also for every running allocation.
So the kmem_cache can't be released unless all allocations complete.

To shutdown non-active empty kmem_caches, let's reuse the work queue,
previously used for the kmem_cache deactivation.  Once the reference
counter reaches 0, let's schedule an asynchronous kmem_cache release.

* I used the following simple approach to test the performance
(stolen from another patchset by T. Harding):

    time find / -name fname-no-exist
    echo 2 > /proc/sys/vm/drop_caches
    repeat 10 times

Results:

        orig		patched

real	0m1.455s	real	0m1.355s
user	0m0.206s	user	0m0.219s
sys	0m0.855s	sys	0m0.807s

real	0m1.487s	real	0m1.699s
user	0m0.221s	user	0m0.256s
sys	0m0.806s	sys	0m0.948s

real	0m1.515s	real	0m1.505s
user	0m0.183s	user	0m0.215s
sys	0m0.876s	sys	0m0.858s

real	0m1.291s	real	0m1.380s
user	0m0.193s	user	0m0.198s
sys	0m0.843s	sys	0m0.786s

real	0m1.364s	real	0m1.374s
user	0m0.180s	user	0m0.182s
sys	0m0.868s	sys	0m0.806s

real	0m1.352s	real	0m1.312s
user	0m0.201s	user	0m0.212s
sys	0m0.820s	sys	0m0.761s

real	0m1.302s	real	0m1.349s
user	0m0.205s	user	0m0.203s
sys	0m0.803s	sys	0m0.792s

real	0m1.334s	real	0m1.301s
user	0m0.194s	user	0m0.201s
sys	0m0.806s	sys	0m0.779s

real	0m1.426s	real	0m1.434s
user	0m0.216s	user	0m0.181s
sys	0m0.824s	sys	0m0.864s

real	0m1.350s	real	0m1.295s
user	0m0.200s	user	0m0.190s
sys	0m0.842s	sys	0m0.811s

So it looks like the difference is not noticeable in this test.

[cai@lca.pw: fix an use-after-free in kmemcg_workfn()]
  Link: http://lkml.kernel.org/r/1560977573-10715-1-git-send-email-cai@lca.pw
Link: http://lkml.kernel.org/r/20190611231813.3148843-9-guro@fb.com
Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Qian Cai <cai@lca.pw>
Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Andrei Vagin <avagin@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 63b02ef
Raw File
test_kasan.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 *
 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
 */

#define pr_fmt(fmt) "kasan test: %s " fmt, __func__

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>

/*
 * Note: test functions are marked noinline so that their names appear in
 * reports.
 */

static noinline void __init kmalloc_oob_right(void)
{
	char *ptr;
	size_t size = 123;

	pr_info("out-of-bounds to right\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	ptr[size] = 'x';
	kfree(ptr);
}

static noinline void __init kmalloc_oob_left(void)
{
	char *ptr;
	size_t size = 15;

	pr_info("out-of-bounds to left\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	*ptr = *(ptr - 1);
	kfree(ptr);
}

static noinline void __init kmalloc_node_oob_right(void)
{
	char *ptr;
	size_t size = 4096;

	pr_info("kmalloc_node(): out-of-bounds to right\n");
	ptr = kmalloc_node(size, GFP_KERNEL, 0);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	ptr[size] = 0;
	kfree(ptr);
}

#ifdef CONFIG_SLUB
static noinline void __init kmalloc_pagealloc_oob_right(void)
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

	/* Allocate a chunk that does not fit into a SLUB cache to trigger
	 * the page allocator fallback.
	 */
	pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	ptr[size] = 0;
	kfree(ptr);
}

static noinline void __init kmalloc_pagealloc_uaf(void)
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

	pr_info("kmalloc pagealloc allocation: use-after-free\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	kfree(ptr);
	ptr[0] = 0;
}

static noinline void __init kmalloc_pagealloc_invalid_free(void)
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

	pr_info("kmalloc pagealloc allocation: invalid-free\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	kfree(ptr + 1);
}
#endif

static noinline void __init kmalloc_large_oob_right(void)
{
	char *ptr;
	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
	/* Allocate a chunk that is large enough, but still fits into a slab
	 * and does not trigger the page allocator fallback in SLUB.
	 */
	pr_info("kmalloc large allocation: out-of-bounds to right\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	ptr[size] = 0;
	kfree(ptr);
}

static noinline void __init kmalloc_oob_krealloc_more(void)
{
	char *ptr1, *ptr2;
	size_t size1 = 17;
	size_t size2 = 19;

	pr_info("out-of-bounds after krealloc more\n");
	ptr1 = kmalloc(size1, GFP_KERNEL);
	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
	if (!ptr1 || !ptr2) {
		pr_err("Allocation failed\n");
		kfree(ptr1);
		return;
	}

	ptr2[size2] = 'x';
	kfree(ptr2);
}

static noinline void __init kmalloc_oob_krealloc_less(void)
{
	char *ptr1, *ptr2;
	size_t size1 = 17;
	size_t size2 = 15;

	pr_info("out-of-bounds after krealloc less\n");
	ptr1 = kmalloc(size1, GFP_KERNEL);
	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
	if (!ptr1 || !ptr2) {
		pr_err("Allocation failed\n");
		kfree(ptr1);
		return;
	}
	ptr2[size2] = 'x';
	kfree(ptr2);
}

static noinline void __init kmalloc_oob_16(void)
{
	struct {
		u64 words[2];
	} *ptr1, *ptr2;

	pr_info("kmalloc out-of-bounds for 16-bytes access\n");
	ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
	if (!ptr1 || !ptr2) {
		pr_err("Allocation failed\n");
		kfree(ptr1);
		kfree(ptr2);
		return;
	}
	*ptr1 = *ptr2;
	kfree(ptr1);
	kfree(ptr2);
}

static noinline void __init kmalloc_oob_memset_2(void)
{
	char *ptr;
	size_t size = 8;

	pr_info("out-of-bounds in memset2\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	memset(ptr+7, 0, 2);
	kfree(ptr);
}

static noinline void __init kmalloc_oob_memset_4(void)
{
	char *ptr;
	size_t size = 8;

	pr_info("out-of-bounds in memset4\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	memset(ptr+5, 0, 4);
	kfree(ptr);
}


static noinline void __init kmalloc_oob_memset_8(void)
{
	char *ptr;
	size_t size = 8;

	pr_info("out-of-bounds in memset8\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	memset(ptr+1, 0, 8);
	kfree(ptr);
}

static noinline void __init kmalloc_oob_memset_16(void)
{
	char *ptr;
	size_t size = 16;

	pr_info("out-of-bounds in memset16\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	memset(ptr+1, 0, 16);
	kfree(ptr);
}

static noinline void __init kmalloc_oob_in_memset(void)
{
	char *ptr;
	size_t size = 666;

	pr_info("out-of-bounds in memset\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	memset(ptr, 0, size+5);
	kfree(ptr);
}

static noinline void __init kmalloc_uaf(void)
{
	char *ptr;
	size_t size = 10;

	pr_info("use-after-free\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	kfree(ptr);
	*(ptr + 8) = 'x';
}

static noinline void __init kmalloc_uaf_memset(void)
{
	char *ptr;
	size_t size = 33;

	pr_info("use-after-free in memset\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	kfree(ptr);
	memset(ptr, 0, size);
}

static noinline void __init kmalloc_uaf2(void)
{
	char *ptr1, *ptr2;
	size_t size = 43;

	pr_info("use-after-free after another kmalloc\n");
	ptr1 = kmalloc(size, GFP_KERNEL);
	if (!ptr1) {
		pr_err("Allocation failed\n");
		return;
	}

	kfree(ptr1);
	ptr2 = kmalloc(size, GFP_KERNEL);
	if (!ptr2) {
		pr_err("Allocation failed\n");
		return;
	}

	ptr1[40] = 'x';
	if (ptr1 == ptr2)
		pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
	kfree(ptr2);
}

static noinline void __init kmem_cache_oob(void)
{
	char *p;
	size_t size = 200;
	struct kmem_cache *cache = kmem_cache_create("test_cache",
						size, 0,
						0, NULL);
	if (!cache) {
		pr_err("Cache allocation failed\n");
		return;
	}
	pr_info("out-of-bounds in kmem_cache_alloc\n");
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
		pr_err("Allocation failed\n");
		kmem_cache_destroy(cache);
		return;
	}

	*p = p[size];
	kmem_cache_free(cache, p);
	kmem_cache_destroy(cache);
}

static noinline void __init memcg_accounted_kmem_cache(void)
{
	int i;
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
	if (!cache) {
		pr_err("Cache allocation failed\n");
		return;
	}

	pr_info("allocate memcg accounted object\n");
	/*
	 * Several allocations with a delay to allow for lazy per memcg kmem
	 * cache creation.
	 */
	for (i = 0; i < 5; i++) {
		p = kmem_cache_alloc(cache, GFP_KERNEL);
		if (!p)
			goto free_cache;

		kmem_cache_free(cache, p);
		msleep(100);
	}

free_cache:
	kmem_cache_destroy(cache);
}

static char global_array[10];

static noinline void __init kasan_global_oob(void)
{
	volatile int i = 3;
	char *p = &global_array[ARRAY_SIZE(global_array) + i];

	pr_info("out-of-bounds global variable\n");
	*(volatile char *)p;
}

static noinline void __init kasan_stack_oob(void)
{
	char stack_array[10];
	volatile int i = 0;
	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];

	pr_info("out-of-bounds on stack\n");
	*(volatile char *)p;
}

static noinline void __init ksize_unpoisons_memory(void)
{
	char *ptr;
	size_t size = 123, real_size;

	pr_info("ksize() unpoisons the whole allocated chunk\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}
	real_size = ksize(ptr);
	/* This access doesn't trigger an error. */
	ptr[size] = 'x';
	/* This one does. */
	ptr[real_size] = 'y';
	kfree(ptr);
}

static noinline void __init copy_user_test(void)
{
	char *kmem;
	char __user *usermem;
	size_t size = 10;
	int unused;

	kmem = kmalloc(size, GFP_KERNEL);
	if (!kmem)
		return;

	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (IS_ERR(usermem)) {
		pr_err("Failed to allocate user memory\n");
		kfree(kmem);
		return;
	}

	pr_info("out-of-bounds in copy_from_user()\n");
	unused = copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in copy_to_user()\n");
	unused = copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user()\n");
	unused = __copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user()\n");
	unused = __copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
	unused = __copy_from_user_inatomic(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
	unused = __copy_to_user_inatomic(usermem, kmem, size + 1);

	pr_info("out-of-bounds in strncpy_from_user()\n");
	unused = strncpy_from_user(kmem, usermem, size + 1);

	vm_munmap((unsigned long)usermem, PAGE_SIZE);
	kfree(kmem);
}

static noinline void __init kasan_alloca_oob_left(void)
{
	volatile int i = 10;
	char alloca_array[i];
	char *p = alloca_array - 1;

	pr_info("out-of-bounds to left on alloca\n");
	*(volatile char *)p;
}

static noinline void __init kasan_alloca_oob_right(void)
{
	volatile int i = 10;
	char alloca_array[i];
	char *p = alloca_array + i;

	pr_info("out-of-bounds to right on alloca\n");
	*(volatile char *)p;
}

static noinline void __init kmem_cache_double_free(void)
{
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
	if (!cache) {
		pr_err("Cache allocation failed\n");
		return;
	}
	pr_info("double-free on heap object\n");
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
		pr_err("Allocation failed\n");
		kmem_cache_destroy(cache);
		return;
	}

	kmem_cache_free(cache, p);
	kmem_cache_free(cache, p);
	kmem_cache_destroy(cache);
}

static noinline void __init kmem_cache_invalid_free(void)
{
	char *p;
	size_t size = 200;
	struct kmem_cache *cache;

	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
				  NULL);
	if (!cache) {
		pr_err("Cache allocation failed\n");
		return;
	}
	pr_info("invalid-free of heap object\n");
	p = kmem_cache_alloc(cache, GFP_KERNEL);
	if (!p) {
		pr_err("Allocation failed\n");
		kmem_cache_destroy(cache);
		return;
	}

	/* Trigger invalid free, the object doesn't get freed */
	kmem_cache_free(cache, p + 1);

	/*
	 * Properly free the object to prevent the "Objects remaining in
	 * test_cache on __kmem_cache_shutdown" BUG failure.
	 */
	kmem_cache_free(cache, p);

	kmem_cache_destroy(cache);
}

static noinline void __init kasan_memchr(void)
{
	char *ptr;
	size_t size = 24;

	pr_info("out-of-bounds in memchr\n");
	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	if (!ptr)
		return;

	memchr(ptr, '1', size + 1);
	kfree(ptr);
}

static noinline void __init kasan_memcmp(void)
{
	char *ptr;
	size_t size = 24;
	int arr[9];

	pr_info("out-of-bounds in memcmp\n");
	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	if (!ptr)
		return;

	memset(arr, 0, sizeof(arr));
	memcmp(ptr, arr, size+1);
	kfree(ptr);
}

static noinline void __init kasan_strings(void)
{
	char *ptr;
	size_t size = 24;

	pr_info("use-after-free in strchr\n");
	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
	if (!ptr)
		return;

	kfree(ptr);

	/*
	 * Try to cause only 1 invalid access (less spam in dmesg).
	 * For that we need ptr to point to zeroed byte.
	 * Skip metadata that could be stored in freed object so ptr
	 * will likely point to zeroed byte.
	 */
	ptr += 16;
	strchr(ptr, '1');

	pr_info("use-after-free in strrchr\n");
	strrchr(ptr, '1');

	pr_info("use-after-free in strcmp\n");
	strcmp(ptr, "2");

	pr_info("use-after-free in strncmp\n");
	strncmp(ptr, "2", 1);

	pr_info("use-after-free in strlen\n");
	strlen(ptr);

	pr_info("use-after-free in strnlen\n");
	strnlen(ptr, 1);
}

static noinline void __init kasan_bitops(void)
{
	/*
	 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
	 * this way we do not actually corrupt other memory.
	 */
	long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
	if (!bits)
		return;

	/*
	 * Below calls try to access bit within allocated memory; however, the
	 * below accesses are still out-of-bounds, since bitops are defined to
	 * operate on the whole long the bit is in.
	 */
	pr_info("out-of-bounds in set_bit\n");
	set_bit(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in __set_bit\n");
	__set_bit(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in clear_bit\n");
	clear_bit(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in __clear_bit\n");
	__clear_bit(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in clear_bit_unlock\n");
	clear_bit_unlock(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in __clear_bit_unlock\n");
	__clear_bit_unlock(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in change_bit\n");
	change_bit(BITS_PER_LONG, bits);

	pr_info("out-of-bounds in __change_bit\n");
	__change_bit(BITS_PER_LONG, bits);

	/*
	 * Below calls try to access bit beyond allocated memory.
	 */
	pr_info("out-of-bounds in test_and_set_bit\n");
	test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in __test_and_set_bit\n");
	__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in test_and_set_bit_lock\n");
	test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in test_and_clear_bit\n");
	test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in __test_and_clear_bit\n");
	__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in test_and_change_bit\n");
	test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in __test_and_change_bit\n");
	__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

	pr_info("out-of-bounds in test_bit\n");
	(void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);

#if defined(clear_bit_unlock_is_negative_byte)
	pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
	clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
#endif
	kfree(bits);
}

static noinline void __init kmalloc_double_kzfree(void)
{
	char *ptr;
	size_t size = 16;

	pr_info("double-free (kzfree)\n");
	ptr = kmalloc(size, GFP_KERNEL);
	if (!ptr) {
		pr_err("Allocation failed\n");
		return;
	}

	kzfree(ptr);
	kzfree(ptr);
}

static int __init kmalloc_tests_init(void)
{
	/*
	 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
	 * report for the first case.
	 */
	bool multishot = kasan_save_enable_multi_shot();

	kmalloc_oob_right();
	kmalloc_oob_left();
	kmalloc_node_oob_right();
#ifdef CONFIG_SLUB
	kmalloc_pagealloc_oob_right();
	kmalloc_pagealloc_uaf();
	kmalloc_pagealloc_invalid_free();
#endif
	kmalloc_large_oob_right();
	kmalloc_oob_krealloc_more();
	kmalloc_oob_krealloc_less();
	kmalloc_oob_16();
	kmalloc_oob_in_memset();
	kmalloc_oob_memset_2();
	kmalloc_oob_memset_4();
	kmalloc_oob_memset_8();
	kmalloc_oob_memset_16();
	kmalloc_uaf();
	kmalloc_uaf_memset();
	kmalloc_uaf2();
	kmem_cache_oob();
	memcg_accounted_kmem_cache();
	kasan_stack_oob();
	kasan_global_oob();
	kasan_alloca_oob_left();
	kasan_alloca_oob_right();
	ksize_unpoisons_memory();
	copy_user_test();
	kmem_cache_double_free();
	kmem_cache_invalid_free();
	kasan_memchr();
	kasan_memcmp();
	kasan_strings();
	kasan_bitops();
	kmalloc_double_kzfree();

	kasan_restore_multi_shot(multishot);

	return -EAGAIN;
}

module_init(kmalloc_tests_init);
MODULE_LICENSE("GPL");
back to top