Revision 49054556289e8787501630b7c7a9d407da02e296 authored by Paolo Abeni on 29 September 2021, 09:59:17 UTC, committed by David S. Miller on 30 September 2021, 12:06:47 UTC
Syzkaller reported a false positive deadlock involving
the nl socket lock and the subflow socket lock:

MPTCP: kernel_bind error, err=-98
============================================
WARNING: possible recursive locking detected
5.15.0-rc1-syzkaller #0 Not tainted
--------------------------------------------
syz-executor998/6520 is trying to acquire lock:
ffff8880795718a0 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x267/0x7b0 net/mptcp/protocol.c:2738

but task is already holding lock:
ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1612 [inline]
ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x23/0x7b0 net/mptcp/protocol.c:2720

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(k-sk_lock-AF_INET);
  lock(k-sk_lock-AF_INET);

 *** DEADLOCK ***

 May be due to missing lock nesting notation

3 locks held by syz-executor998/6520:
 #0: ffffffff8d176c50 (cb_lock){++++}-{3:3}, at: genl_rcv+0x15/0x40 net/netlink/genetlink.c:802
 #1: ffffffff8d176d08 (genl_mutex){+.+.}-{3:3}, at: genl_lock net/netlink/genetlink.c:33 [inline]
 #1: ffffffff8d176d08 (genl_mutex){+.+.}-{3:3}, at: genl_rcv_msg+0x3e0/0x580 net/netlink/genetlink.c:790
 #2: ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1612 [inline]
 #2: ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x23/0x7b0 net/mptcp/protocol.c:2720

stack backtrace:
CPU: 1 PID: 6520 Comm: syz-executor998 Not tainted 5.15.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
 __dump_stack lib/dump_stack.c:88 [inline]
 dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
 print_deadlock_bug kernel/locking/lockdep.c:2944 [inline]
 check_deadlock kernel/locking/lockdep.c:2987 [inline]
 validate_chain kernel/locking/lockdep.c:3776 [inline]
 __lock_acquire.cold+0x149/0x3ab kernel/locking/lockdep.c:5015
 lock_acquire kernel/locking/lockdep.c:5625 [inline]
 lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590
 lock_sock_fast+0x36/0x100 net/core/sock.c:3229
 mptcp_close+0x267/0x7b0 net/mptcp/protocol.c:2738
 inet_release+0x12e/0x280 net/ipv4/af_inet.c:431
 __sock_release net/socket.c:649 [inline]
 sock_release+0x87/0x1b0 net/socket.c:677
 mptcp_pm_nl_create_listen_socket+0x238/0x2c0 net/mptcp/pm_netlink.c:900
 mptcp_nl_cmd_add_addr+0x359/0x930 net/mptcp/pm_netlink.c:1170
 genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:731
 genl_family_rcv_msg net/netlink/genetlink.c:775 [inline]
 genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:792
 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2504
 genl_rcv+0x24/0x40 net/netlink/genetlink.c:803
 netlink_unicast_kernel net/netlink/af_netlink.c:1314 [inline]
 netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1340
 netlink_sendmsg+0x86d/0xdb0 net/netlink/af_netlink.c:1929
 sock_sendmsg_nosec net/socket.c:704 [inline]
 sock_sendmsg+0xcf/0x120 net/socket.c:724
 sock_no_sendpage+0x101/0x150 net/core/sock.c:2980
 kernel_sendpage.part.0+0x1a0/0x340 net/socket.c:3504
 kernel_sendpage net/socket.c:3501 [inline]
 sock_sendpage+0xe5/0x140 net/socket.c:1003
 pipe_to_sendpage+0x2ad/0x380 fs/splice.c:364
 splice_from_pipe_feed fs/splice.c:418 [inline]
 __splice_from_pipe+0x43e/0x8a0 fs/splice.c:562
 splice_from_pipe fs/splice.c:597 [inline]
 generic_splice_sendpage+0xd4/0x140 fs/splice.c:746
 do_splice_from fs/splice.c:767 [inline]
 direct_splice_actor+0x110/0x180 fs/splice.c:936
 splice_direct_to_actor+0x34b/0x8c0 fs/splice.c:891
 do_splice_direct+0x1b3/0x280 fs/splice.c:979
 do_sendfile+0xae9/0x1240 fs/read_write.c:1249
 __do_sys_sendfile64 fs/read_write.c:1314 [inline]
 __se_sys_sendfile64 fs/read_write.c:1300 [inline]
 __x64_sys_sendfile64+0x1cc/0x210 fs/read_write.c:1300
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]
 do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
 entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f215cb69969
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 14 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007ffc96bb3868 EFLAGS: 00000246 ORIG_RAX: 0000000000000028
RAX: ffffffffffffffda RBX: 00007f215cbad072 RCX: 00007f215cb69969
RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000005
RBP: 0000000000000000 R08: 00007ffc96bb3a08 R09: 00007ffc96bb3a08
R10: 0000000100000002 R11: 0000000000000246 R12: 00007ffc96bb387c
R13: 431bde82d7b634db R14: 0000000000000000 R15: 0000000000000000

the problem originates from uncorrect lock annotation in the mptcp
code and is only visible since commit 2dcb96bacce3 ("net: core: Correct
the sock::sk_lock.owned lockdep annotations"), but is present since
the port-based endpoint support initial implementation.

This patch addresses the issue introducing a nested variant of
lock_sock_fast() and using it in the relevant code path.

Fixes: 1729cf186d8a ("mptcp: create the listening socket for new port")
Fixes: 2dcb96bacce3 ("net: core: Correct the sock::sk_lock.owned lockdep annotations")
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Reported-and-tested-by: syzbot+1dd53f7a89b299d59eaf@syzkaller.appspotmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent d88fd1b
Raw File
test_hash.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Test cases for <linux/hash.h> and <linux/stringhash.h>
 * This just verifies that various ways of computing a hash
 * produce the same thing and, for cases where a k-bit hash
 * value is requested, is of the requested size.
 *
 * We fill a buffer with a 255-byte null-terminated string,
 * and use both full_name_hash() and hashlen_string() to hash the
 * substrings from i to j, where 0 <= i < j < 256.
 *
 * The returned values are used to check that __hash_32() and
 * __hash_32_generic() compute the same thing.  Likewise hash_32()
 * and hash_64().
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"

#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/printk.h>

/* 32-bit XORSHIFT generator.  Seed must not be zero. */
static u32 __init __attribute_const__
xorshift(u32 seed)
{
	seed ^= seed << 13;
	seed ^= seed >> 17;
	seed ^= seed << 5;
	return seed;
}

/* Given a non-zero x, returns a non-zero byte. */
static u8 __init __attribute_const__
mod255(u32 x)
{
	x = (x & 0xffff) + (x >> 16);	/* 1 <= x <= 0x1fffe */
	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0x2fd */
	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0x100 */
	x = (x & 0xff) + (x >> 8);	/* 1 <= x <= 0xff */
	return x;
}

/* Fill the buffer with non-zero bytes. */
static void __init
fill_buf(char *buf, size_t len, u32 seed)
{
	size_t i;

	for (i = 0; i < len; i++) {
		seed = xorshift(seed);
		buf[i] = mod255(seed);
	}
}

/*
 * Test the various integer hash functions.  h64 (or its low-order bits)
 * is the integer to hash.  hash_or accumulates the OR of the hash values,
 * which are later checked to see that they cover all the requested bits.
 *
 * Because these functions (as opposed to the string hashes) are all
 * inline, the code being tested is actually in the module, and you can
 * recompile and re-test the module without rebooting.
 */
static bool __init
test_int_hash(unsigned long long h64, u32 hash_or[2][33])
{
	int k;
	u32 h0 = (u32)h64, h1, h2;

	/* Test __hash32 */
	hash_or[0][0] |= h1 = __hash_32(h0);
#ifdef HAVE_ARCH__HASH_32
	hash_or[1][0] |= h2 = __hash_32_generic(h0);
#if HAVE_ARCH__HASH_32 == 1
	if (h1 != h2) {
		pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
			h0, h1, h2);
		return false;
	}
#endif
#endif

	/* Test k = 1..32 bits */
	for (k = 1; k <= 32; k++) {
		u32 const m = ((u32)2 << (k-1)) - 1;	/* Low k bits set */

		/* Test hash_32 */
		hash_or[0][k] |= h1 = hash_32(h0, k);
		if (h1 > m) {
			pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
			return false;
		}
#ifdef HAVE_ARCH_HASH_32
		h2 = hash_32_generic(h0, k);
#if HAVE_ARCH_HASH_32 == 1
		if (h1 != h2) {
			pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
				" = %#x", h0, k, h1, h2);
			return false;
		}
#else
		if (h2 > m) {
			pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
				h0, k, h1, m);
			return false;
		}
#endif
#endif
		/* Test hash_64 */
		hash_or[1][k] |= h1 = hash_64(h64, k);
		if (h1 > m) {
			pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
			return false;
		}
#ifdef HAVE_ARCH_HASH_64
		h2 = hash_64_generic(h64, k);
#if HAVE_ARCH_HASH_64 == 1
		if (h1 != h2) {
			pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
				"= %#x", h64, k, h1, h2);
			return false;
		}
#else
		if (h2 > m) {
			pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
				h64, k, h1, m);
			return false;
		}
#endif
#endif
	}

	(void)h2;	/* Suppress unused variable warning */
	return true;
}

#define SIZE 256	/* Run time is cubic in SIZE */

static int __init
test_hash_init(void)
{
	char buf[SIZE+1];
	u32 string_or = 0, hash_or[2][33] = { { 0, } };
	unsigned tests = 0;
	unsigned long long h64 = 0;
	int i, j;

	fill_buf(buf, SIZE, 1);

	/* Test every possible non-empty substring in the buffer. */
	for (j = SIZE; j > 0; --j) {
		buf[j] = '\0';

		for (i = 0; i <= j; i++) {
			u64 hashlen = hashlen_string(buf+i, buf+i);
			u32 h0 = full_name_hash(buf+i, buf+i, j-i);

			/* Check that hashlen_string gets the length right */
			if (hashlen_len(hashlen) != j-i) {
				pr_err("hashlen_string(%d..%d) returned length"
					" %u, expected %d",
					i, j, hashlen_len(hashlen), j-i);
				return -EINVAL;
			}
			/* Check that the hashes match */
			if (hashlen_hash(hashlen) != h0) {
				pr_err("hashlen_string(%d..%d) = %08x != "
					"full_name_hash() = %08x",
					i, j, hashlen_hash(hashlen), h0);
				return -EINVAL;
			}

			string_or |= h0;
			h64 = h64 << 32 | h0;	/* For use with hash_64 */
			if (!test_int_hash(h64, hash_or))
				return -EINVAL;
			tests++;
		} /* i */
	} /* j */

	/* The OR of all the hash values should cover all the bits */
	if (~string_or) {
		pr_err("OR of all string hash results = %#x != %#x",
			string_or, -1u);
		return -EINVAL;
	}
	if (~hash_or[0][0]) {
		pr_err("OR of all __hash_32 results = %#x != %#x",
			hash_or[0][0], -1u);
		return -EINVAL;
	}
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1	/* Test is pointless if results match */
	if (~hash_or[1][0]) {
		pr_err("OR of all __hash_32_generic results = %#x != %#x",
			hash_or[1][0], -1u);
		return -EINVAL;
	}
#endif
#endif

	/* Likewise for all the i-bit hash values */
	for (i = 1; i <= 32; i++) {
		u32 const m = ((u32)2 << (i-1)) - 1;	/* Low i bits set */

		if (hash_or[0][i] != m) {
			pr_err("OR of all hash_32(%d) results = %#x "
				"(%#x expected)", i, hash_or[0][i], m);
			return -EINVAL;
		}
		if (hash_or[1][i] != m) {
			pr_err("OR of all hash_64(%d) results = %#x "
				"(%#x expected)", i, hash_or[1][i], m);
			return -EINVAL;
		}
	}

	/* Issue notices about skipped tests. */
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1
	pr_info("__hash_32() is arch-specific; not compared to generic.");
#endif
#else
	pr_info("__hash_32() has no arch implementation to test.");
#endif
#ifdef HAVE_ARCH_HASH_32
#if HAVE_ARCH_HASH_32 != 1
	pr_info("hash_32() is arch-specific; not compared to generic.");
#endif
#else
	pr_info("hash_32() has no arch implementation to test.");
#endif
#ifdef HAVE_ARCH_HASH_64
#if HAVE_ARCH_HASH_64 != 1
	pr_info("hash_64() is arch-specific; not compared to generic.");
#endif
#else
	pr_info("hash_64() has no arch implementation to test.");
#endif

	pr_notice("%u tests passed.", tests);

	return 0;
}

static void __exit test_hash_exit(void)
{
}

module_init(test_hash_init);	/* Does everything */
module_exit(test_hash_exit);	/* Does nothing */

MODULE_LICENSE("GPL");
back to top