Revision 3e1a0699095803e53072699a4a1485af7744601d authored by Joe Thornber on 03 March 2014, 16:03:26 UTC, committed by Mike Snitzer on 05 March 2014, 20:26:58 UTC
Ideally a thin pool would never run out of data space; the low water
mark would trigger userland to extend the pool before we completely run
out of space.  However, many small random IOs to unprovisioned space can
consume data space at an alarming rate.  Adjust your low water mark if
you're frequently seeing "out-of-data-space" mode.

Before this fix, if data space ran out the pool would be put in
PM_READ_ONLY mode which also aborted the pool's current metadata
transaction (data loss for any changes in the transaction).  This had a
side-effect of needlessly compromising data consistency.  And retry of
queued unserviceable bios, once the data pool was resized, could
initiate changes to potentially inconsistent pool metadata.

Now when the pool's data space is exhausted transition to a new pool
mode (PM_OUT_OF_DATA_SPACE) that allows metadata to be changed but data
may not be allocated.  This allows users to remove thin volumes or
discard data to recover data space.

The pool is no longer put in PM_READ_ONLY mode in response to the pool
running out of data space.  And PM_READ_ONLY mode no longer aborts the
pool's current metadata transaction.  Also, set_pool_mode() will now
notify userspace when the pool mode is changed.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
1 parent 07f2b6e
Raw File
cpuidle-ux500.c
/*
 * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
 *
 * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
 * and Jonas Aaberg <jonas.aberg@stericsson.com>.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/cpuidle.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/smp.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/arm-ux500-pm.h>
#include <linux/platform_device.h>

#include <asm/cpuidle.h>
#include <asm/proc-fns.h>

static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);

static inline int ux500_enter_idle(struct cpuidle_device *dev,
				   struct cpuidle_driver *drv, int index)
{
	int this_cpu = smp_processor_id();
	bool recouple = false;

	if (atomic_inc_return(&master) == num_online_cpus()) {

		/* With this lock, we prevent the other cpu to exit and enter
		 * this function again and become the master */
		if (!spin_trylock(&master_lock))
			goto wfi;

		/* decouple the gic from the A9 cores */
		if (prcmu_gic_decouple()) {
			spin_unlock(&master_lock);
			goto out;
		}

		/* If an error occur, we will have to recouple the gic
		 * manually */
		recouple = true;

		/* At this state, as the gic is decoupled, if the other
		 * cpu is in WFI, we have the guarantee it won't be wake
		 * up, so we can safely go to retention */
		if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
			goto out;

		/* The prcmu will be in charge of watching the interrupts
		 * and wake up the cpus */
		if (prcmu_copy_gic_settings())
			goto out;

		/* Check in the meantime an interrupt did
		 * not occur on the gic ... */
		if (prcmu_gic_pending_irq())
			goto out;

		/* ... and the prcmu */
		if (prcmu_pending_irq())
			goto out;

		/* Go to the retention state, the prcmu will wait for the
		 * cpu to go WFI and this is what happens after exiting this
		 * 'master' critical section */
		if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
			goto out;

		/* When we switch to retention, the prcmu is in charge
		 * of recoupling the gic automatically */
		recouple = false;

		spin_unlock(&master_lock);
	}
wfi:
	cpu_do_idle();
out:
	atomic_dec(&master);

	if (recouple) {
		prcmu_gic_recouple();
		spin_unlock(&master_lock);
	}

	return index;
}

static struct cpuidle_driver ux500_idle_driver = {
	.name = "ux500_idle",
	.owner = THIS_MODULE,
	.states = {
		ARM_CPUIDLE_WFI_STATE,
		{
			.enter		  = ux500_enter_idle,
			.exit_latency	  = 70,
			.target_residency = 260,
			.flags		  = CPUIDLE_FLAG_TIME_VALID |
			                    CPUIDLE_FLAG_TIMER_STOP,
			.name		  = "ApIdle",
			.desc		  = "ARM Retention",
		},
	},
	.safe_state_index = 0,
	.state_count = 2,
};

static int dbx500_cpuidle_probe(struct platform_device *pdev)
{
	/* Configure wake up reasons */
	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
			     PRCMU_WAKEUP(ABB));

	return cpuidle_register(&ux500_idle_driver, NULL);
}

static struct platform_driver dbx500_cpuidle_plat_driver = {
	.driver = {
		.name = "cpuidle-dbx500",
		.owner = THIS_MODULE,
	},
	.probe = dbx500_cpuidle_probe,
};

module_platform_driver(dbx500_cpuidle_plat_driver);
back to top