Revision 70feee0e1ef331b22cc51f383d532a0d043fbdcc authored by Yisheng Xie on 02 June 2017, 21:46:43 UTC, committed by Linus Torvalds on 02 June 2017, 22:07:38 UTC
Kefeng reported that when running the follow test, the mlock count in
meminfo will increase permanently:

 [1] testcase
 linux:~ # cat test_mlockal
 grep Mlocked /proc/meminfo
  for j in `seq 0 10`
  do
 	for i in `seq 4 15`
 	do
 		./p_mlockall >> log &
 	done
 	sleep 0.2
 done
 # wait some time to let mlock counter decrease and 5s may not enough
 sleep 5
 grep Mlocked /proc/meminfo

 linux:~ # cat p_mlockall.c
 #include <sys/mman.h>
 #include <stdlib.h>
 #include <stdio.h>

 #define SPACE_LEN	4096

 int main(int argc, char ** argv)
 {
	 	int ret;
	 	void *adr = malloc(SPACE_LEN);
	 	if (!adr)
	 		return -1;

	 	ret = mlockall(MCL_CURRENT | MCL_FUTURE);
	 	printf("mlcokall ret = %d\n", ret);

	 	ret = munlockall();
	 	printf("munlcokall ret = %d\n", ret);

	 	free(adr);
	 	return 0;
	 }

In __munlock_pagevec() we should decrement NR_MLOCK for each page where
we clear the PageMlocked flag.  Commit 1ebb7cc6a583 ("mm: munlock: batch
NR_MLOCK zone state updates") has introduced a bug where we don't
decrement NR_MLOCK for pages where we clear the flag, but fail to
isolate them from the lru list (e.g.  when the pages are on some other
cpu's percpu pagevec).  Since PageMlocked stays cleared, the NR_MLOCK
accounting gets permanently disrupted by this.

Fix it by counting the number of page whose PageMlock flag is cleared.

Fixes: 1ebb7cc6a583 (" mm: munlock: batch NR_MLOCK zone state updates")
Link: http://lkml.kernel.org/r/1495678405-54569-1-git-send-email-xieyisheng1@huawei.com
Signed-off-by: Yisheng Xie <xieyisheng1@huawei.com>
Reported-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Tested-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Joern Engel <joern@logfs.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michel Lespinasse <walken@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: zhongjiang <zhongjiang@huawei.com>
Cc: Hanjun Guo <guohanjun@huawei.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 30809f5
Raw File
Kconfig.locks
#
# The ARCH_INLINE foo is necessary because select ignores "depends on"
#
config ARCH_INLINE_SPIN_TRYLOCK
	bool

config ARCH_INLINE_SPIN_TRYLOCK_BH
	bool

config ARCH_INLINE_SPIN_LOCK
	bool

config ARCH_INLINE_SPIN_LOCK_BH
	bool

config ARCH_INLINE_SPIN_LOCK_IRQ
	bool

config ARCH_INLINE_SPIN_LOCK_IRQSAVE
	bool

config ARCH_INLINE_SPIN_UNLOCK
	bool

config ARCH_INLINE_SPIN_UNLOCK_BH
	bool

config ARCH_INLINE_SPIN_UNLOCK_IRQ
	bool

config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
	bool


config ARCH_INLINE_READ_TRYLOCK
	bool

config ARCH_INLINE_READ_LOCK
	bool

config ARCH_INLINE_READ_LOCK_BH
	bool

config ARCH_INLINE_READ_LOCK_IRQ
	bool

config ARCH_INLINE_READ_LOCK_IRQSAVE
	bool

config ARCH_INLINE_READ_UNLOCK
	bool

config ARCH_INLINE_READ_UNLOCK_BH
	bool

config ARCH_INLINE_READ_UNLOCK_IRQ
	bool

config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
	bool


config ARCH_INLINE_WRITE_TRYLOCK
	bool

config ARCH_INLINE_WRITE_LOCK
	bool

config ARCH_INLINE_WRITE_LOCK_BH
	bool

config ARCH_INLINE_WRITE_LOCK_IRQ
	bool

config ARCH_INLINE_WRITE_LOCK_IRQSAVE
	bool

config ARCH_INLINE_WRITE_UNLOCK
	bool

config ARCH_INLINE_WRITE_UNLOCK_BH
	bool

config ARCH_INLINE_WRITE_UNLOCK_IRQ
	bool

config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
	bool

config UNINLINE_SPIN_UNLOCK
	bool

#
# lock_* functions are inlined when:
#   - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
#
# trylock_* functions are inlined when:
#   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
#
# unlock and unlock_irq functions are inlined when:
#   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
#  or
#   - DEBUG_SPINLOCK=n and PREEMPT=n
#
# unlock_bh and unlock_irqrestore functions are inlined when:
#   - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
#

if !DEBUG_SPINLOCK

config INLINE_SPIN_TRYLOCK
	def_bool y
	depends on ARCH_INLINE_SPIN_TRYLOCK

config INLINE_SPIN_TRYLOCK_BH
	def_bool y
	depends on ARCH_INLINE_SPIN_TRYLOCK_BH

config INLINE_SPIN_LOCK
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK

config INLINE_SPIN_LOCK_BH
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_BH

config INLINE_SPIN_LOCK_IRQ
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQ

config INLINE_SPIN_LOCK_IRQSAVE
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQSAVE

config INLINE_SPIN_UNLOCK_BH
	def_bool y
	depends on ARCH_INLINE_SPIN_UNLOCK_BH

config INLINE_SPIN_UNLOCK_IRQ
	def_bool y
	depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_IRQ

config INLINE_SPIN_UNLOCK_IRQRESTORE
	def_bool y
	depends on ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE


config INLINE_READ_TRYLOCK
	def_bool y
	depends on ARCH_INLINE_READ_TRYLOCK

config INLINE_READ_LOCK
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK

config INLINE_READ_LOCK_BH
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_BH

config INLINE_READ_LOCK_IRQ
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQ

config INLINE_READ_LOCK_IRQSAVE
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQSAVE

config INLINE_READ_UNLOCK
	def_bool y
	depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK

config INLINE_READ_UNLOCK_BH
	def_bool y
	depends on ARCH_INLINE_READ_UNLOCK_BH

config INLINE_READ_UNLOCK_IRQ
	def_bool y
	depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_IRQ

config INLINE_READ_UNLOCK_IRQRESTORE
	def_bool y
	depends on ARCH_INLINE_READ_UNLOCK_IRQRESTORE


config INLINE_WRITE_TRYLOCK
	def_bool y
	depends on ARCH_INLINE_WRITE_TRYLOCK

config INLINE_WRITE_LOCK
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK

config INLINE_WRITE_LOCK_BH
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_BH

config INLINE_WRITE_LOCK_IRQ
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQ

config INLINE_WRITE_LOCK_IRQSAVE
	def_bool y
	depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQSAVE

config INLINE_WRITE_UNLOCK
	def_bool y
	depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK

config INLINE_WRITE_UNLOCK_BH
	def_bool y
	depends on ARCH_INLINE_WRITE_UNLOCK_BH

config INLINE_WRITE_UNLOCK_IRQ
	def_bool y
	depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_IRQ

config INLINE_WRITE_UNLOCK_IRQRESTORE
	def_bool y
	depends on ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE

endif

config ARCH_SUPPORTS_ATOMIC_RMW
	bool

config MUTEX_SPIN_ON_OWNER
	def_bool y
	depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW

config RWSEM_SPIN_ON_OWNER
       def_bool y
       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW

config LOCK_SPIN_ON_OWNER
       def_bool y
       depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER

config ARCH_USE_QUEUED_SPINLOCKS
	bool

config QUEUED_SPINLOCKS
	def_bool y if ARCH_USE_QUEUED_SPINLOCKS
	depends on SMP

config ARCH_USE_QUEUED_RWLOCKS
	bool

config QUEUED_RWLOCKS
	def_bool y if ARCH_USE_QUEUED_RWLOCKS
	depends on SMP
back to top