Revision 3e1a0699095803e53072699a4a1485af7744601d authored by Joe Thornber on 03 March 2014, 16:03:26 UTC, committed by Mike Snitzer on 05 March 2014, 20:26:58 UTC
Ideally a thin pool would never run out of data space; the low water
mark would trigger userland to extend the pool before we completely run
out of space.  However, many small random IOs to unprovisioned space can
consume data space at an alarming rate.  Adjust your low water mark if
you're frequently seeing "out-of-data-space" mode.

Before this fix, if data space ran out the pool would be put in
PM_READ_ONLY mode which also aborted the pool's current metadata
transaction (data loss for any changes in the transaction).  This had a
side-effect of needlessly compromising data consistency.  And retry of
queued unserviceable bios, once the data pool was resized, could
initiate changes to potentially inconsistent pool metadata.

Now when the pool's data space is exhausted transition to a new pool
mode (PM_OUT_OF_DATA_SPACE) that allows metadata to be changed but data
may not be allocated.  This allows users to remove thin volumes or
discard data to recover data space.

The pool is no longer put in PM_READ_ONLY mode in response to the pool
running out of data space.  And PM_READ_ONLY mode no longer aborts the
pool's current metadata transaction.  Also, set_pool_mode() will now
notify userspace when the pool mode is changed.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
1 parent 07f2b6e
Raw File
csio_defs.h
/*
 * This file is part of the Chelsio FCoE driver for Linux.
 *
 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef __CSIO_DEFS_H__
#define __CSIO_DEFS_H__

#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>
#include <linux/jiffies.h>

#define CSIO_INVALID_IDX		0xFFFFFFFF
#define CSIO_INC_STATS(elem, val)	((elem)->stats.val++)
#define CSIO_DEC_STATS(elem, val)	((elem)->stats.val--)
#define CSIO_VALID_WWN(__n)		((*__n >> 4) == 0x5 ? true : false)
#define CSIO_DID_MASK			0xFFFFFF
#define CSIO_WORD_TO_BYTE		4

#ifndef readq
static inline u64 readq(void __iomem *addr)
{
	return readl(addr) + ((u64)readl(addr + 4) << 32);
}

static inline void writeq(u64 val, void __iomem *addr)
{
	writel(val, addr);
	writel(val >> 32, addr + 4);
}
#endif

static inline int
csio_list_deleted(struct list_head *list)
{
	return ((list->next == list) && (list->prev == list));
}

#define csio_list_next(elem)	(((struct list_head *)(elem))->next)
#define csio_list_prev(elem)	(((struct list_head *)(elem))->prev)

/* State machine */
typedef void (*csio_sm_state_t)(void *, uint32_t);

struct csio_sm {
	struct list_head	sm_list;
	csio_sm_state_t		sm_state;
};

static inline void
csio_set_state(void *smp, void *state)
{
	((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
}

static inline void
csio_init_state(struct csio_sm *smp, void *state)
{
	csio_set_state(smp, state);
}

static inline void
csio_post_event(void *smp, uint32_t evt)
{
	((struct csio_sm *)smp)->sm_state(smp, evt);
}

static inline csio_sm_state_t
csio_get_state(void *smp)
{
	return ((struct csio_sm *)smp)->sm_state;
}

static inline bool
csio_match_state(void *smp, void *state)
{
	return (csio_get_state(smp) == (csio_sm_state_t)state);
}

#define	CSIO_ASSERT(cond)		BUG_ON(!(cond))

#ifdef __CSIO_DEBUG__
#define CSIO_DB_ASSERT(__c)		CSIO_ASSERT((__c))
#else
#define CSIO_DB_ASSERT(__c)
#endif

#endif /* ifndef __CSIO_DEFS_H__ */
back to top