Revision 85bd839983778fcd0c1c043327b14a046e979b39 authored by Gu Zheng on 10 June 2015, 18:14:43 UTC, committed by Linus Torvalds on 10 June 2015, 23:43:43 UTC
Izumi found the following oops when hot re-adding a node:

    BUG: unable to handle kernel paging request at ffffc90008963690
    IP: __wake_up_bit+0x20/0x70
    Oops: 0000 [#1] SMP
    CPU: 68 PID: 1237 Comm: rs:main Q:Reg Not tainted 4.1.0-rc5 #80
    Hardware name: FUJITSU PRIMEQUEST2800E/SB, BIOS PRIMEQUEST 2000 Series BIOS Version 1.87 04/28/2015
    task: ffff880838df8000 ti: ffff880017b94000 task.ti: ffff880017b94000
    RIP: 0010:[<ffffffff810dff80>]  [<ffffffff810dff80>] __wake_up_bit+0x20/0x70
    RSP: 0018:ffff880017b97be8  EFLAGS: 00010246
    RAX: ffffc90008963690 RBX: 00000000003c0000 RCX: 000000000000a4c9
    RDX: 0000000000000000 RSI: ffffea101bffd500 RDI: ffffc90008963648
    RBP: ffff880017b97c08 R08: 0000000002000020 R09: 0000000000000000
    R10: 0000000000000000 R11: 0000000000000000 R12: ffff8a0797c73800
    R13: ffffea101bffd500 R14: 0000000000000001 R15: 00000000003c0000
    FS:  00007fcc7ffff700(0000) GS:ffff880874800000(0000) knlGS:0000000000000000
    CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
    CR2: ffffc90008963690 CR3: 0000000836761000 CR4: 00000000001407e0
    Call Trace:
      unlock_page+0x6d/0x70
      generic_write_end+0x53/0xb0
      xfs_vm_write_end+0x29/0x80 [xfs]
      generic_perform_write+0x10a/0x1e0
      xfs_file_buffered_aio_write+0x14d/0x3e0 [xfs]
      xfs_file_write_iter+0x79/0x120 [xfs]
      __vfs_write+0xd4/0x110
      vfs_write+0xac/0x1c0
      SyS_write+0x58/0xd0
      system_call_fastpath+0x12/0x76
    Code: 5d c3 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 48 89 e5 48 83 ec 20 65 48 8b 04 25 28 00 00 00 48 89 45 f8 31 c0 48 8d 47 48 <48> 39 47 48 48 c7 45 e8 00 00 00 00 48 c7 45 f0 00 00 00 00 48
    RIP  [<ffffffff810dff80>] __wake_up_bit+0x20/0x70
     RSP <ffff880017b97be8>
    CR2: ffffc90008963690

Reproduce method (re-add a node)::
  Hot-add nodeA --> remove nodeA --> hot-add nodeA (panic)

This seems an use-after-free problem, and the root cause is
zone->wait_table was not set to *NULL* after free it in
try_offline_node.

When hot re-add a node, we will reuse the pgdat of it, so does the zone
struct, and when add pages to the target zone, it will init the zone
first (including the wait_table) if the zone is not initialized.  The
judgement of zone initialized is based on zone->wait_table:

	static inline bool zone_is_initialized(struct zone *zone)
	{
		return !!zone->wait_table;
	}

so if we do not set the zone->wait_table to *NULL* after free it, the
memory hotplug routine will skip the init of new zone when hot re-add
the node, and the wait_table still points to the freed memory, then we
will access the invalid address when trying to wake up the waiting
people after the i/o operation with the page is done, such as mentioned
above.

Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
Reported-by: Taku Izumi <izumi.taku@jp.fujitsu.com>
Reviewed by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 5879ae5
Raw File
cpuidle-cps.c
/*
 * Copyright (C) 2014 Imagination Technologies
 * Author: Paul Burton <paul.burton@imgtec.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
#include <linux/init.h>

#include <asm/idle.h>
#include <asm/pm-cps.h>

/* Enumeration of the various idle states this driver may enter */
enum cps_idle_state {
	STATE_WAIT = 0,		/* MIPS wait instruction, coherent */
	STATE_NC_WAIT,		/* MIPS wait instruction, non-coherent */
	STATE_CLOCK_GATED,	/* Core clock gated */
	STATE_POWER_GATED,	/* Core power gated */
	STATE_COUNT
};

static int cps_nc_enter(struct cpuidle_device *dev,
			struct cpuidle_driver *drv, int index)
{
	enum cps_pm_state pm_state;
	int err;

	/*
	 * At least one core must remain powered up & clocked in order for the
	 * system to have any hope of functioning.
	 *
	 * TODO: don't treat core 0 specially, just prevent the final core
	 * TODO: remap interrupt affinity temporarily
	 */
	if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT))
		index = STATE_NC_WAIT;

	/* Select the appropriate cps_pm_state */
	switch (index) {
	case STATE_NC_WAIT:
		pm_state = CPS_PM_NC_WAIT;
		break;
	case STATE_CLOCK_GATED:
		pm_state = CPS_PM_CLOCK_GATED;
		break;
	case STATE_POWER_GATED:
		pm_state = CPS_PM_POWER_GATED;
		break;
	default:
		BUG();
		return -EINVAL;
	}

	/* Notify listeners the CPU is about to power down */
	if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
		return -EINTR;

	/* Enter that state */
	err = cps_pm_enter_state(pm_state);

	/* Notify listeners the CPU is back up */
	if (pm_state == CPS_PM_POWER_GATED)
		cpu_pm_exit();

	return err ?: index;
}

static struct cpuidle_driver cps_driver = {
	.name			= "cpc_cpuidle",
	.owner			= THIS_MODULE,
	.states = {
		[STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
		[STATE_NC_WAIT] = {
			.enter	= cps_nc_enter,
			.exit_latency		= 200,
			.target_residency	= 450,
			.name	= "nc-wait",
			.desc	= "non-coherent MIPS wait",
		},
		[STATE_CLOCK_GATED] = {
			.enter	= cps_nc_enter,
			.exit_latency		= 300,
			.target_residency	= 700,
			.flags	= CPUIDLE_FLAG_TIMER_STOP,
			.name	= "clock-gated",
			.desc	= "core clock gated",
		},
		[STATE_POWER_GATED] = {
			.enter	= cps_nc_enter,
			.exit_latency		= 600,
			.target_residency	= 1000,
			.flags	= CPUIDLE_FLAG_TIMER_STOP,
			.name	= "power-gated",
			.desc	= "core power gated",
		},
	},
	.state_count		= STATE_COUNT,
	.safe_state_index	= 0,
};

static void __init cps_cpuidle_unregister(void)
{
	int cpu;
	struct cpuidle_device *device;

	for_each_possible_cpu(cpu) {
		device = &per_cpu(cpuidle_dev, cpu);
		cpuidle_unregister_device(device);
	}

	cpuidle_unregister_driver(&cps_driver);
}

static int __init cps_cpuidle_init(void)
{
	int err, cpu, core, i;
	struct cpuidle_device *device;

	/* Detect supported states */
	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
		cps_driver.state_count = STATE_CLOCK_GATED + 1;
	if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
		cps_driver.state_count = STATE_NC_WAIT + 1;
	if (!cps_pm_support_state(CPS_PM_NC_WAIT))
		cps_driver.state_count = STATE_WAIT + 1;

	/* Inform the user if some states are unavailable */
	if (cps_driver.state_count < STATE_COUNT) {
		pr_info("cpuidle-cps: limited to ");
		switch (cps_driver.state_count - 1) {
		case STATE_WAIT:
			pr_cont("coherent wait\n");
			break;
		case STATE_NC_WAIT:
			pr_cont("non-coherent wait\n");
			break;
		case STATE_CLOCK_GATED:
			pr_cont("clock gating\n");
			break;
		}
	}

	/*
	 * Set the coupled flag on the appropriate states if this system
	 * requires it.
	 */
	if (coupled_coherence)
		for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
			cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;

	err = cpuidle_register_driver(&cps_driver);
	if (err) {
		pr_err("Failed to register CPS cpuidle driver\n");
		return err;
	}

	for_each_possible_cpu(cpu) {
		core = cpu_data[cpu].core;
		device = &per_cpu(cpuidle_dev, cpu);
		device->cpu = cpu;
#ifdef CONFIG_MIPS_MT
		cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
#endif

		err = cpuidle_register_device(device);
		if (err) {
			pr_err("Failed to register CPU%d cpuidle device\n",
			       cpu);
			goto err_out;
		}
	}

	return 0;
err_out:
	cps_cpuidle_unregister();
	return err;
}
device_initcall(cps_cpuidle_init);
back to top