Revision 9a291a7c9428155e8e623e4a3989f8be47134df5 authored by James Morse on 02 June 2017, 21:46:46 UTC, committed by Linus Torvalds on 02 June 2017, 22:07:38 UTC
KVM uses get_user_pages() to resolve its stage2 faults.  KVM sets the
FOLL_HWPOISON flag causing faultin_page() to return -EHWPOISON when it
finds a VM_FAULT_HWPOISON.  KVM handles these hwpoison pages as a
special case.  (check_user_page_hwpoison())

When huge pages are involved, this doesn't work so well.
get_user_pages() calls follow_hugetlb_page(), which stops early if it
receives VM_FAULT_HWPOISON from hugetlb_fault(), eventually returning
-EFAULT to the caller.  The step to map this to -EHWPOISON based on the
FOLL_ flags is missing.  The hwpoison special case is skipped, and
-EFAULT is returned to user-space, causing Qemu or kvmtool to exit.

Instead, move this VM_FAULT_ to errno mapping code into a header file
and use it from faultin_page() and follow_hugetlb_page().

With this, KVM works as expected.

This isn't a problem for arm64 today as we haven't enabled
MEMORY_FAILURE, but I can't see any reason this doesn't happen on x86
too, so I think this should be a fix.  This doesn't apply earlier than
stable's v4.11.1 due to all sorts of cleanup.

[james.morse@arm.com: add vm_fault_to_errno() call to faultin_page()]
suggested.
  Link: http://lkml.kernel.org/r/20170525171035.16359-1-james.morse@arm.com
[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20170524160900.28786-1-james.morse@arm.com
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Punit Agrawal <punit.agrawal@arm.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <stable@vger.kernel.org>	[4.11.1+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 70feee0
Raw File
io.c
/*
 * Copyright (c) 2016 Trond Myklebust
 *
 * I/O and data path helper functionality.
 */

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/rwsem.h>
#include <linux/fs.h>
#include <linux/nfs_fs.h>

#include "internal.h"

/* Call with exclusively locked inode->i_rwsem */
static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
{
	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
		clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
		inode_dio_wait(inode);
	}
}

/**
 * nfs_start_io_read - declare the file is being used for buffered reads
 * @inode - file inode
 *
 * Declare that a buffered read operation is about to start, and ensure
 * that we block all direct I/O.
 * On exit, the function ensures that the NFS_INO_ODIRECT flag is unset,
 * and holds a shared lock on inode->i_rwsem to ensure that the flag
 * cannot be changed.
 * In practice, this means that buffered read operations are allowed to
 * execute in parallel, thanks to the shared lock, whereas direct I/O
 * operations need to wait to grab an exclusive lock in order to set
 * NFS_INO_ODIRECT.
 * Note that buffered writes and truncates both take a write lock on
 * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
 */
void
nfs_start_io_read(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	/* Be an optimist! */
	down_read(&inode->i_rwsem);
	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
		return;
	up_read(&inode->i_rwsem);
	/* Slow path.... */
	down_write(&inode->i_rwsem);
	nfs_block_o_direct(nfsi, inode);
	downgrade_write(&inode->i_rwsem);
}

/**
 * nfs_end_io_read - declare that the buffered read operation is done
 * @inode - file inode
 *
 * Declare that a buffered read operation is done, and release the shared
 * lock on inode->i_rwsem.
 */
void
nfs_end_io_read(struct inode *inode)
{
	up_read(&inode->i_rwsem);
}

/**
 * nfs_start_io_write - declare the file is being used for buffered writes
 * @inode - file inode
 *
 * Declare that a buffered read operation is about to start, and ensure
 * that we block all direct I/O.
 */
void
nfs_start_io_write(struct inode *inode)
{
	down_write(&inode->i_rwsem);
	nfs_block_o_direct(NFS_I(inode), inode);
}

/**
 * nfs_end_io_write - declare that the buffered write operation is done
 * @inode - file inode
 *
 * Declare that a buffered write operation is done, and release the
 * lock on inode->i_rwsem.
 */
void
nfs_end_io_write(struct inode *inode)
{
	up_write(&inode->i_rwsem);
}

/* Call with exclusively locked inode->i_rwsem */
static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
{
	if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
		set_bit(NFS_INO_ODIRECT, &nfsi->flags);
		nfs_wb_all(inode);
	}
}

/**
 * nfs_end_io_direct - declare the file is being used for direct i/o
 * @inode - file inode
 *
 * Declare that a direct I/O operation is about to start, and ensure
 * that we block all buffered I/O.
 * On exit, the function ensures that the NFS_INO_ODIRECT flag is set,
 * and holds a shared lock on inode->i_rwsem to ensure that the flag
 * cannot be changed.
 * In practice, this means that direct I/O operations are allowed to
 * execute in parallel, thanks to the shared lock, whereas buffered I/O
 * operations need to wait to grab an exclusive lock in order to clear
 * NFS_INO_ODIRECT.
 * Note that buffered writes and truncates both take a write lock on
 * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
 */
void
nfs_start_io_direct(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	/* Be an optimist! */
	down_read(&inode->i_rwsem);
	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
		return;
	up_read(&inode->i_rwsem);
	/* Slow path.... */
	down_write(&inode->i_rwsem);
	nfs_block_buffered(nfsi, inode);
	downgrade_write(&inode->i_rwsem);
}

/**
 * nfs_end_io_direct - declare that the direct i/o operation is done
 * @inode - file inode
 *
 * Declare that a direct I/O operation is done, and release the shared
 * lock on inode->i_rwsem.
 */
void
nfs_end_io_direct(struct inode *inode)
{
	up_read(&inode->i_rwsem);
}
back to top