Revision 49054556289e8787501630b7c7a9d407da02e296 authored by Paolo Abeni on 29 September 2021, 09:59:17 UTC, committed by David S. Miller on 30 September 2021, 12:06:47 UTC
Syzkaller reported a false positive deadlock involving
the nl socket lock and the subflow socket lock:

MPTCP: kernel_bind error, err=-98
============================================
WARNING: possible recursive locking detected
5.15.0-rc1-syzkaller #0 Not tainted
--------------------------------------------
syz-executor998/6520 is trying to acquire lock:
ffff8880795718a0 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x267/0x7b0 net/mptcp/protocol.c:2738

but task is already holding lock:
ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1612 [inline]
ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x23/0x7b0 net/mptcp/protocol.c:2720

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(k-sk_lock-AF_INET);
  lock(k-sk_lock-AF_INET);

 *** DEADLOCK ***

 May be due to missing lock nesting notation

3 locks held by syz-executor998/6520:
 #0: ffffffff8d176c50 (cb_lock){++++}-{3:3}, at: genl_rcv+0x15/0x40 net/netlink/genetlink.c:802
 #1: ffffffff8d176d08 (genl_mutex){+.+.}-{3:3}, at: genl_lock net/netlink/genetlink.c:33 [inline]
 #1: ffffffff8d176d08 (genl_mutex){+.+.}-{3:3}, at: genl_rcv_msg+0x3e0/0x580 net/netlink/genetlink.c:790
 #2: ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1612 [inline]
 #2: ffff8880787c8c60 (k-sk_lock-AF_INET){+.+.}-{0:0}, at: mptcp_close+0x23/0x7b0 net/mptcp/protocol.c:2720

stack backtrace:
CPU: 1 PID: 6520 Comm: syz-executor998 Not tainted 5.15.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
 __dump_stack lib/dump_stack.c:88 [inline]
 dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
 print_deadlock_bug kernel/locking/lockdep.c:2944 [inline]
 check_deadlock kernel/locking/lockdep.c:2987 [inline]
 validate_chain kernel/locking/lockdep.c:3776 [inline]
 __lock_acquire.cold+0x149/0x3ab kernel/locking/lockdep.c:5015
 lock_acquire kernel/locking/lockdep.c:5625 [inline]
 lock_acquire+0x1ab/0x510 kernel/locking/lockdep.c:5590
 lock_sock_fast+0x36/0x100 net/core/sock.c:3229
 mptcp_close+0x267/0x7b0 net/mptcp/protocol.c:2738
 inet_release+0x12e/0x280 net/ipv4/af_inet.c:431
 __sock_release net/socket.c:649 [inline]
 sock_release+0x87/0x1b0 net/socket.c:677
 mptcp_pm_nl_create_listen_socket+0x238/0x2c0 net/mptcp/pm_netlink.c:900
 mptcp_nl_cmd_add_addr+0x359/0x930 net/mptcp/pm_netlink.c:1170
 genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:731
 genl_family_rcv_msg net/netlink/genetlink.c:775 [inline]
 genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:792
 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2504
 genl_rcv+0x24/0x40 net/netlink/genetlink.c:803
 netlink_unicast_kernel net/netlink/af_netlink.c:1314 [inline]
 netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1340
 netlink_sendmsg+0x86d/0xdb0 net/netlink/af_netlink.c:1929
 sock_sendmsg_nosec net/socket.c:704 [inline]
 sock_sendmsg+0xcf/0x120 net/socket.c:724
 sock_no_sendpage+0x101/0x150 net/core/sock.c:2980
 kernel_sendpage.part.0+0x1a0/0x340 net/socket.c:3504
 kernel_sendpage net/socket.c:3501 [inline]
 sock_sendpage+0xe5/0x140 net/socket.c:1003
 pipe_to_sendpage+0x2ad/0x380 fs/splice.c:364
 splice_from_pipe_feed fs/splice.c:418 [inline]
 __splice_from_pipe+0x43e/0x8a0 fs/splice.c:562
 splice_from_pipe fs/splice.c:597 [inline]
 generic_splice_sendpage+0xd4/0x140 fs/splice.c:746
 do_splice_from fs/splice.c:767 [inline]
 direct_splice_actor+0x110/0x180 fs/splice.c:936
 splice_direct_to_actor+0x34b/0x8c0 fs/splice.c:891
 do_splice_direct+0x1b3/0x280 fs/splice.c:979
 do_sendfile+0xae9/0x1240 fs/read_write.c:1249
 __do_sys_sendfile64 fs/read_write.c:1314 [inline]
 __se_sys_sendfile64 fs/read_write.c:1300 [inline]
 __x64_sys_sendfile64+0x1cc/0x210 fs/read_write.c:1300
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]
 do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
 entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f215cb69969
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 14 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007ffc96bb3868 EFLAGS: 00000246 ORIG_RAX: 0000000000000028
RAX: ffffffffffffffda RBX: 00007f215cbad072 RCX: 00007f215cb69969
RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000005
RBP: 0000000000000000 R08: 00007ffc96bb3a08 R09: 00007ffc96bb3a08
R10: 0000000100000002 R11: 0000000000000246 R12: 00007ffc96bb387c
R13: 431bde82d7b634db R14: 0000000000000000 R15: 0000000000000000

the problem originates from uncorrect lock annotation in the mptcp
code and is only visible since commit 2dcb96bacce3 ("net: core: Correct
the sock::sk_lock.owned lockdep annotations"), but is present since
the port-based endpoint support initial implementation.

This patch addresses the issue introducing a nested variant of
lock_sock_fast() and using it in the relevant code path.

Fixes: 1729cf186d8a ("mptcp: create the listening socket for new port")
Fixes: 2dcb96bacce3 ("net: core: Correct the sock::sk_lock.owned lockdep annotations")
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Reported-and-tested-by: syzbot+1dd53f7a89b299d59eaf@syzkaller.appspotmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent d88fd1b
Raw File
sse2.c
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
 *
 *   Copyright 2002 H. Peter Anvin - All Rights Reserved
 *
 * ----------------------------------------------------------------------- */

/*
 * raid6/sse2.c
 *
 * SSE-2 implementation of RAID-6 syndrome functions
 *
 */

#include <linux/raid/pq.h>
#include "x86.h"

static const struct raid6_sse_constants {
	u64 x1d[2];
} raid6_sse_constants  __attribute__((aligned(16))) = {
	{ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
};

static int raid6_have_sse2(void)
{
	/* Not really boot_cpu but "all_cpus" */
	return boot_cpu_has(X86_FEATURE_MMX) &&
		boot_cpu_has(X86_FEATURE_FXSR) &&
		boot_cpu_has(X86_FEATURE_XMM) &&
		boot_cpu_has(X86_FEATURE_XMM2);
}

/*
 * Plain SSE2 implementation
 */
static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = disks - 3;		/* Highest data disk */
	p = dptr[z0+1];		/* XOR parity */
	q = dptr[z0+2];		/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
	asm volatile("pxor %xmm5,%xmm5");	/* Zero temp */

	for ( d = 0 ; d < bytes ; d += 16 ) {
		asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
		asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
		asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
		asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
		asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
		for ( z = z0-2 ; z >= 0 ; z-- ) {
			asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm6,%xmm2");
			asm volatile("pxor %xmm6,%xmm4");
			asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
		}
		asm volatile("pcmpgtb %xmm4,%xmm5");
		asm volatile("paddb %xmm4,%xmm4");
		asm volatile("pand %xmm0,%xmm5");
		asm volatile("pxor %xmm5,%xmm4");
		asm volatile("pxor %xmm5,%xmm5");
		asm volatile("pxor %xmm6,%xmm2");
		asm volatile("pxor %xmm6,%xmm4");

		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
		asm volatile("pxor %xmm2,%xmm2");
		asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
		asm volatile("pxor %xmm4,%xmm4");
	}

	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}


static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
				     size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = stop;		/* P/Q right side optimization */
	p = dptr[disks-2];	/* XOR parity */
	q = dptr[disks-1];	/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));

	for ( d = 0 ; d < bytes ; d += 16 ) {
		asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
		asm volatile("pxor %xmm4,%xmm2");
		/* P/Q data pages */
		for ( z = z0-1 ; z >= start ; z-- ) {
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
			asm volatile("pxor %xmm5,%xmm2");
			asm volatile("pxor %xmm5,%xmm4");
		}
		/* P/Q left side optimization */
		for ( z = start-1 ; z >= 0 ; z-- ) {
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pxor %xmm5,%xmm4");
		}
		asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
		/* Don't use movntdq for r/w memory area < cache line */
		asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
		asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
	}

	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}

const struct raid6_calls raid6_sse2x1 = {
	raid6_sse21_gen_syndrome,
	raid6_sse21_xor_syndrome,
	raid6_have_sse2,
	"sse2x1",
	1			/* Has cache hints */
};

/*
 * Unrolled-by-2 SSE2 implementation
 */
static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = disks - 3;		/* Highest data disk */
	p = dptr[z0+1];		/* XOR parity */
	q = dptr[z0+2];		/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
	asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
	asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */

	/* We uniformly assume a single prefetch covers at least 32 bytes */
	for ( d = 0 ; d < bytes ; d += 32 ) {
		asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
		asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d]));    /* P[0] */
		asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
		asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
		asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
		for ( z = z0-1 ; z >= 0 ; z-- ) {
			asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
			asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
			asm volatile("pxor %xmm5,%xmm2");
			asm volatile("pxor %xmm7,%xmm3");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
		}
		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
		asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
		asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
	}

	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}

static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
				     size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = stop;		/* P/Q right side optimization */
	p = dptr[disks-2];	/* XOR parity */
	q = dptr[disks-1];	/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));

	for ( d = 0 ; d < bytes ; d += 32 ) {
		asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
		asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
		asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
		asm volatile("pxor %xmm4,%xmm2");
		asm volatile("pxor %xmm6,%xmm3");
		/* P/Q data pages */
		for ( z = z0-1 ; z >= start ; z-- ) {
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
			asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
			asm volatile("pxor %xmm5,%xmm2");
			asm volatile("pxor %xmm7,%xmm3");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
		}
		/* P/Q left side optimization */
		for ( z = start-1 ; z >= 0 ; z-- ) {
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
		}
		asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
		asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
		/* Don't use movntdq for r/w memory area < cache line */
		asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
		asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
		asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
		asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
	}

	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}

const struct raid6_calls raid6_sse2x2 = {
	raid6_sse22_gen_syndrome,
	raid6_sse22_xor_syndrome,
	raid6_have_sse2,
	"sse2x2",
	1			/* Has cache hints */
};

#ifdef CONFIG_X86_64

/*
 * Unrolled-by-4 SSE2 implementation
 */
static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = disks - 3;		/* Highest data disk */
	p = dptr[z0+1];		/* XOR parity */
	q = dptr[z0+2];		/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
	asm volatile("pxor %xmm2,%xmm2");	/* P[0] */
	asm volatile("pxor %xmm3,%xmm3");	/* P[1] */
	asm volatile("pxor %xmm4,%xmm4"); 	/* Q[0] */
	asm volatile("pxor %xmm5,%xmm5");	/* Zero temp */
	asm volatile("pxor %xmm6,%xmm6"); 	/* Q[1] */
	asm volatile("pxor %xmm7,%xmm7"); 	/* Zero temp */
	asm volatile("pxor %xmm10,%xmm10");	/* P[2] */
	asm volatile("pxor %xmm11,%xmm11");	/* P[3] */
	asm volatile("pxor %xmm12,%xmm12"); 	/* Q[2] */
	asm volatile("pxor %xmm13,%xmm13");	/* Zero temp */
	asm volatile("pxor %xmm14,%xmm14"); 	/* Q[3] */
	asm volatile("pxor %xmm15,%xmm15"); 	/* Zero temp */

	for ( d = 0 ; d < bytes ; d += 64 ) {
		for ( z = z0 ; z >= 0 ; z-- ) {
			/* The second prefetch seems to improve performance... */
			asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
			asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("pcmpgtb %xmm12,%xmm13");
			asm volatile("pcmpgtb %xmm14,%xmm15");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("paddb %xmm12,%xmm12");
			asm volatile("paddb %xmm14,%xmm14");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pand %xmm0,%xmm13");
			asm volatile("pand %xmm0,%xmm15");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm13,%xmm12");
			asm volatile("pxor %xmm15,%xmm14");
			asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
			asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
			asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
			asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
			asm volatile("pxor %xmm5,%xmm2");
			asm volatile("pxor %xmm7,%xmm3");
			asm volatile("pxor %xmm13,%xmm10");
			asm volatile("pxor %xmm15,%xmm11");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm13,%xmm12");
			asm volatile("pxor %xmm15,%xmm14");
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
			asm volatile("pxor %xmm13,%xmm13");
			asm volatile("pxor %xmm15,%xmm15");
		}
		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
		asm volatile("pxor %xmm2,%xmm2");
		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
		asm volatile("pxor %xmm3,%xmm3");
		asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
		asm volatile("pxor %xmm10,%xmm10");
		asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
		asm volatile("pxor %xmm11,%xmm11");
		asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
		asm volatile("pxor %xmm4,%xmm4");
		asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
		asm volatile("pxor %xmm6,%xmm6");
		asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
		asm volatile("pxor %xmm12,%xmm12");
		asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
		asm volatile("pxor %xmm14,%xmm14");
	}

	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}

static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
				     size_t bytes, void **ptrs)
{
	u8 **dptr = (u8 **)ptrs;
	u8 *p, *q;
	int d, z, z0;

	z0 = stop;		/* P/Q right side optimization */
	p = dptr[disks-2];	/* XOR parity */
	q = dptr[disks-1];	/* RS syndrome */

	kernel_fpu_begin();

	asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));

	for ( d = 0 ; d < bytes ; d += 64 ) {
		asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
		asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
		asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
		asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
		asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
		asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
		asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
		asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
		asm volatile("pxor %xmm4,%xmm2");
		asm volatile("pxor %xmm6,%xmm3");
		asm volatile("pxor %xmm12,%xmm10");
		asm volatile("pxor %xmm14,%xmm11");
		/* P/Q data pages */
		for ( z = z0-1 ; z >= start ; z-- ) {
			asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
			asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
			asm volatile("pxor %xmm13,%xmm13");
			asm volatile("pxor %xmm15,%xmm15");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("pcmpgtb %xmm12,%xmm13");
			asm volatile("pcmpgtb %xmm14,%xmm15");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("paddb %xmm12,%xmm12");
			asm volatile("paddb %xmm14,%xmm14");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pand %xmm0,%xmm13");
			asm volatile("pand %xmm0,%xmm15");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm13,%xmm12");
			asm volatile("pxor %xmm15,%xmm14");
			asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
			asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
			asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
			asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
			asm volatile("pxor %xmm5,%xmm2");
			asm volatile("pxor %xmm7,%xmm3");
			asm volatile("pxor %xmm13,%xmm10");
			asm volatile("pxor %xmm15,%xmm11");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm13,%xmm12");
			asm volatile("pxor %xmm15,%xmm14");
		}
		asm volatile("prefetchnta %0" :: "m" (q[d]));
		asm volatile("prefetchnta %0" :: "m" (q[d+32]));
		/* P/Q left side optimization */
		for ( z = start-1 ; z >= 0 ; z-- ) {
			asm volatile("pxor %xmm5,%xmm5");
			asm volatile("pxor %xmm7,%xmm7");
			asm volatile("pxor %xmm13,%xmm13");
			asm volatile("pxor %xmm15,%xmm15");
			asm volatile("pcmpgtb %xmm4,%xmm5");
			asm volatile("pcmpgtb %xmm6,%xmm7");
			asm volatile("pcmpgtb %xmm12,%xmm13");
			asm volatile("pcmpgtb %xmm14,%xmm15");
			asm volatile("paddb %xmm4,%xmm4");
			asm volatile("paddb %xmm6,%xmm6");
			asm volatile("paddb %xmm12,%xmm12");
			asm volatile("paddb %xmm14,%xmm14");
			asm volatile("pand %xmm0,%xmm5");
			asm volatile("pand %xmm0,%xmm7");
			asm volatile("pand %xmm0,%xmm13");
			asm volatile("pand %xmm0,%xmm15");
			asm volatile("pxor %xmm5,%xmm4");
			asm volatile("pxor %xmm7,%xmm6");
			asm volatile("pxor %xmm13,%xmm12");
			asm volatile("pxor %xmm15,%xmm14");
		}
		asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
		asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
		asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
		asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
		asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
		asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
		asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
		asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
		asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
		asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
		asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
		asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
	}
	asm volatile("sfence" : : : "memory");
	kernel_fpu_end();
}


const struct raid6_calls raid6_sse2x4 = {
	raid6_sse24_gen_syndrome,
	raid6_sse24_xor_syndrome,
	raid6_have_sse2,
	"sse2x4",
	1			/* Has cache hints */
};

#endif /* CONFIG_X86_64 */
back to top