Revision 6e474083f3daf3a3546737f5d7d502ad12eb257c authored by Wei Xu on 01 December 2017, 10:10:36 UTC, committed by David S. Miller on 03 December 2017, 02:31:03 UTC
Matthew found a roughly 40% tcp throughput regression with commit
c67df11f(vhost_net: try batch dequing from skb array) as discussed
in the following thread:
https://www.mail-archive.com/netdev@vger.kernel.org/msg187936.html

Eventually we figured out that it was a skb leak in handle_rx()
when sending packets to the VM. This usually happens when a guest
can not drain out vq as fast as vhost fills in, afterwards it sets
off the traffic jam and leaks skb(s) which occurs as no headcount
to send on the vq from vhost side.

This can be avoided by making sure we have got enough headcount
before actually consuming a skb from the batched rx array while
transmitting, which is simply done by moving checking the zero
headcount a bit ahead.

Signed-off-by: Wei Xu <wexu@redhat.com>
Reported-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent fa935ca
Raw File
spear13xx_pcie_gadget.c
/*
 * drivers/misc/spear13xx_pcie_gadget.c
 *
 * Copyright (C) 2010 ST Microelectronics
 * Pratyush Anand<pratyush.anand@gmail.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2. This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

#include <linux/device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pci_regs.h>
#include <linux/configfs.h>
#include <mach/pcie.h>
#include <mach/misc_regs.h>

#define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
/* In current implementation address translation is done using IN0 only.
 * So IN1 start address and IN0 end address has been kept same
*/
#define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
#define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
#define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
#define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
#define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
/* Keep default BAR size as 4K*/
/* AORAM would be mapped by default*/
#define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)

#define INT_TYPE_NO_INT	0
#define INT_TYPE_INTX	1
#define INT_TYPE_MSI	2
struct spear_pcie_gadget_config {
	void __iomem *base;
	void __iomem *va_app_base;
	void __iomem *va_dbi_base;
	char int_type[10];
	ulong requested_msi;
	ulong configured_msi;
	ulong bar0_size;
	ulong bar0_rw_offset;
	void __iomem *va_bar0_address;
};

struct pcie_gadget_target {
	struct configfs_subsystem subsys;
	struct spear_pcie_gadget_config config;
};

struct pcie_gadget_target_attr {
	struct configfs_attribute	attr;
	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
						char *buf);
	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
						 const char *buf,
						 size_t count);
};

static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
{
	/* Enable DBI access */
	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
			&app_reg->slv_armisc);
	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
			&app_reg->slv_awmisc);

}

static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
{
	/* disable DBI access */
	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
			&app_reg->slv_armisc);
	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
			&app_reg->slv_awmisc);

}

static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
		int where, int size, u32 *val)
{
	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
	ulong va_address;

	/* Enable DBI access */
	enable_dbi_access(app_reg);

	va_address = (ulong)config->va_dbi_base + (where & ~0x3);

	*val = readl(va_address);

	if (size == 1)
		*val = (*val >> (8 * (where & 3))) & 0xff;
	else if (size == 2)
		*val = (*val >> (8 * (where & 3))) & 0xffff;

	/* Disable DBI access */
	disable_dbi_access(app_reg);
}

static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
		int where, int size, u32 val)
{
	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
	ulong va_address;

	/* Enable DBI access */
	enable_dbi_access(app_reg);

	va_address = (ulong)config->va_dbi_base + (where & ~0x3);

	if (size == 4)
		writel(val, va_address);
	else if (size == 2)
		writew(val, va_address + (where & 2));
	else if (size == 1)
		writeb(val, va_address + (where & 3));

	/* Disable DBI access */
	disable_dbi_access(app_reg);
}

#define PCI_FIND_CAP_TTL	48

static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
		u32 pos, int cap, int *ttl)
{
	u32 id;

	while ((*ttl)--) {
		spear_dbi_read_reg(config, pos, 1, &pos);
		if (pos < 0x40)
			break;
		pos &= ~3;
		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
		if (id == 0xff)
			break;
		if (id == cap)
			return pos;
		pos += PCI_CAP_LIST_NEXT;
	}
	return 0;
}

static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
			u32 pos, int cap)
{
	int ttl = PCI_FIND_CAP_TTL;

	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
}

static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
				u8 hdr_type)
{
	u32 status;

	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
	if (!(status & PCI_STATUS_CAP_LIST))
		return 0;

	switch (hdr_type) {
	case PCI_HEADER_TYPE_NORMAL:
	case PCI_HEADER_TYPE_BRIDGE:
		return PCI_CAPABILITY_LIST;
	case PCI_HEADER_TYPE_CARDBUS:
		return PCI_CB_CAPABILITY_LIST;
	default:
		return 0;
	}

	return 0;
}

/*
 * Tell if a device supports a given PCI capability.
 * Returns the address of the requested capability structure within the
 * device's PCI configuration space or 0 in case the device does not
 * support it. Possible values for @cap:
 *
 * %PCI_CAP_ID_PM	Power Management
 * %PCI_CAP_ID_AGP	Accelerated Graphics Port
 * %PCI_CAP_ID_VPD	Vital Product Data
 * %PCI_CAP_ID_SLOTID	Slot Identification
 * %PCI_CAP_ID_MSI	Message Signalled Interrupts
 * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
 * %PCI_CAP_ID_PCIX	PCI-X
 * %PCI_CAP_ID_EXP	PCI Express
 */
static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
		int cap)
{
	u32 pos;
	u32 hdr_type;

	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);

	pos = pci_find_own_cap_start(config, hdr_type);
	if (pos)
		pos = pci_find_own_next_cap(config, pos, cap);

	return pos;
}

static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
{
	return 0;
}

/*
 * configfs interfaces show/store functions
 */

static struct pcie_gadget_target *to_target(struct config_item *item)
{
	return item ?
		container_of(to_configfs_subsystem(to_config_group(item)),
				struct pcie_gadget_target, subsys) : NULL;
}

static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
{
	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;

	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
		return sprintf(buf, "UP");
	else
		return sprintf(buf, "DOWN");
}

static ssize_t pcie_gadget_link_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;

	if (sysfs_streq(buf, "UP"))
		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
			&app_reg->app_ctrl_0);
	else if (sysfs_streq(buf, "DOWN"))
		writel(readl(&app_reg->app_ctrl_0)
				& ~(1 << APP_LTSSM_ENABLE_ID),
				&app_reg->app_ctrl_0);
	else
		return -EINVAL;
	return count;
}

static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
{
	return sprintf(buf, "%s", to_target(item)->int_type);
}

static ssize_t pcie_gadget_int_type_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	u32 cap, vec, flags;
	ulong vector;

	if (sysfs_streq(buf, "INTA"))
		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);

	else if (sysfs_streq(buf, "MSI")) {
		vector = config->requested_msi;
		vec = 0;
		while (vector > 1) {
			vector /= 2;
			vec++;
		}
		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
		flags &= ~PCI_MSI_FLAGS_QMASK;
		flags |= vec << 1;
		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
	} else
		return -EINVAL;

	strcpy(config->int_type, buf);

	return count;
}

static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
	u32 cap, vec, flags;
	ulong vector;

	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
			!= (1 << CFG_MSI_EN_ID))
		vector = 0;
	else {
		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
		flags &= ~PCI_MSI_FLAGS_QSIZE;
		vec = flags >> 4;
		vector = 1;
		while (vec--)
			vector *= 2;
	}
	config->configured_msi = vector;

	return sprintf(buf, "%lu", vector);
}

static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
		const char *buf, size_t count)
{
	int ret;

	ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
	if (ret)
		return ret;

	if (config->requested_msi > 32)
		config->requested_msi = 32;

	return count;
}

static ssize_t pcie_gadget_inta_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
	ulong en;
	int ret;

	ret = kstrtoul(buf, 0, &en);
	if (ret)
		return ret;

	if (en)
		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
				&app_reg->app_ctrl_0);
	else
		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
				&app_reg->app_ctrl_0);

	return count;
}

static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
	ulong vector;
	u32 ven_msi;
	int ret;

	ret = kstrtoul(buf, 0, &vector);
	if (ret)
		return ret;

	if (!config->configured_msi)
		return -EINVAL;

	if (vector >= config->configured_msi)
		return -EINVAL;

	ven_msi = readl(&app_reg->ven_msi_1);
	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
	ven_msi &= ~VEN_MSI_TC_MASK;
	ven_msi |= 0 << VEN_MSI_TC_ID;
	ven_msi &= ~VEN_MSI_VECTOR_MASK;
	ven_msi |= vector << VEN_MSI_VECTOR_ID;

	/* generating interrupt for msi vector */
	ven_msi |= VEN_MSI_REQ_EN;
	writel(ven_msi, &app_reg->ven_msi_1);
	udelay(1);
	ven_msi &= ~VEN_MSI_REQ_EN;
	writel(ven_msi, &app_reg->ven_msi_1);

	return count;
}

static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
{
	u32 id;

	spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);

	return sprintf(buf, "%x", id);
}

static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
		const char *buf, size_t count)
{
	ulong id;
	int ret;

	ret = kstrtoul(buf, 0, &id);
	if (ret)
		return ret;

	spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);

	return count;
}

static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
{
	u32 id;

	spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);

	return sprintf(buf, "%x", id);
}

static ssize_t pcie_gadget_device_id_store(struct config_item *item,
		const char *buf, size_t count)
{
	ulong id;
	int ret;

	ret = kstrtoul(buf, 0, &id);
	if (ret)
		return ret;

	spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);

	return count;
}

static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
{
	return sprintf(buf, "%lx", to_target(item)->bar0_size);
}

static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	ulong size;
	u32 pos, pos1;
	u32 no_of_bit = 0;
	int ret;

	ret = kstrtoul(buf, 0, &size);
	if (ret)
		return ret;

	/* min bar size is 256 */
	if (size <= 0x100)
		size = 0x100;
	/* max bar size is 1MB*/
	else if (size >= 0x100000)
		size = 0x100000;
	else {
		pos = 0;
		pos1 = 0;
		while (pos < 21) {
			pos = find_next_bit((ulong *)&size, 21, pos);
			if (pos != 21)
				pos1 = pos + 1;
			pos++;
			no_of_bit++;
		}
		if (no_of_bit == 2)
			pos1--;

		size = 1 << pos1;
	}
	config->bar0_size = size;
	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);

	return count;
}

static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
		char *buf)
{
	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;

	u32 address = readl(&app_reg->pim0_mem_addr_start);

	return sprintf(buf, "%x", address);
}

static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
	ulong address;
	int ret;

	ret = kstrtoul(buf, 0, &address);
	if (ret)
		return ret;

	address &= ~(config->bar0_size - 1);
	if (config->va_bar0_address)
		iounmap(config->va_bar0_address);
	config->va_bar0_address = ioremap(address, config->bar0_size);
	if (!config->va_bar0_address)
		return -ENOMEM;

	writel(address, &app_reg->pim0_mem_addr_start);

	return count;
}

static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
		char *buf)
{
	return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
}

static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
		const char *buf, size_t count)
{
	ulong offset;
	int ret;

	ret = kstrtoul(buf, 0, &offset);
	if (ret)
		return ret;

	if (offset % 4)
		return -EINVAL;

	to_target(item)->bar0_rw_offset = offset;

	return count;
}

static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	ulong data;

	if (!config->va_bar0_address)
		return -ENOMEM;

	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);

	return sprintf(buf, "%lx", data);
}

static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
		const char *buf, size_t count)
{
	struct spear_pcie_gadget_config *config = to_target(item)
	ulong data;
	int ret;

	ret = kstrtoul(buf, 0, &data);
	if (ret)
		return ret;

	if (!config->va_bar0_address)
		return -ENOMEM;

	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);

	return count;
}

CONFIGFS_ATTR(pcie_gadget_, link);
CONFIGFS_ATTR(pcie_gadget_, int_type);
CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
CONFIGFS_ATTR_WO(pcie_gadget_, inta);
CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
CONFIGFS_ATTR(pcie_gadget_, vendor_id);
CONFIGFS_ATTR(pcie_gadget_, device_id);
CONFIGFS_ATTR(pcie_gadget_, bar0_size);
CONFIGFS_ATTR(pcie_gadget_, bar0_address);
CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
CONFIGFS_ATTR(pcie_gadget_, bar0_data);

static struct configfs_attribute *pcie_gadget_target_attrs[] = {
	&pcie_gadget_attr_link,
	&pcie_gadget_attr_int_type,
	&pcie_gadget_attr_no_of_msi,
	&pcie_gadget_attr_inta,
	&pcie_gadget_attr_send_msi,
	&pcie_gadget_attr_vendor_id,
	&pcie_gadget_attr_device_id,
	&pcie_gadget_attr_bar0_size,
	&pcie_gadget_attr_bar0_address,
	&pcie_gadget_attr_bar0_rw_offset,
	&pcie_gadget_attr_bar0_data,
	NULL,
};

static struct config_item_type pcie_gadget_target_type = {
	.ct_attrs		= pcie_gadget_target_attrs,
	.ct_owner		= THIS_MODULE,
};

static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
{
	struct pcie_app_reg __iomem *app_reg = config->va_app_base;

	/*setup registers for outbound translation */

	writel(config->base, &app_reg->in0_mem_addr_start);
	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
			&app_reg->in0_mem_addr_limit);
	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
			&app_reg->in1_mem_addr_limit);
	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
			&app_reg->in_io_addr_limit);
	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
			&app_reg->in_cfg0_addr_limit);
	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
			&app_reg->in_cfg1_addr_limit);
	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
			&app_reg->in_msg_addr_limit);

	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);

	/*setup registers for inbound translation */

	/* Keep AORAM mapped at BAR0 as default */
	config->bar0_size = INBOUND_ADDR_MASK + 1;
	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
			config->bar0_size);

	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
	writel(0, &app_reg->pim1_mem_addr_start);
	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);

	writel(0x0, &app_reg->pim_io_addr_start);
	writel(0x0, &app_reg->pim_io_addr_start);
	writel(0x0, &app_reg->pim_rom_addr_start);

	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
			| ((u32)1 << REG_TRANSLATION_ENABLE),
			&app_reg->app_ctrl_0);
	/* disable all rx interrupts */
	writel(0, &app_reg->int_mask);

	/* Select INTA as default*/
	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
}

static int spear_pcie_gadget_probe(struct platform_device *pdev)
{
	struct resource *res0, *res1;
	unsigned int status = 0;
	int irq;
	struct clk *clk;
	static struct pcie_gadget_target *target;
	struct spear_pcie_gadget_config *config;
	struct config_item		*cg_item;
	struct configfs_subsystem *subsys;

	target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
	if (!target) {
		dev_err(&pdev->dev, "out of memory\n");
		return -ENOMEM;
	}

	cg_item = &target->subsys.su_group.cg_item;
	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
	cg_item->ci_type	= &pcie_gadget_target_type;
	config = &target->config;

	/* get resource for application registers*/
	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
	if (IS_ERR(config->va_app_base)) {
		dev_err(&pdev->dev, "ioremap fail\n");
		return PTR_ERR(config->va_app_base);
	}

	/* get resource for dbi registers*/
	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	config->base = (void __iomem *)res1->start;

	config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
	if (IS_ERR(config->va_dbi_base)) {
		dev_err(&pdev->dev, "ioremap fail\n");
		return PTR_ERR(config->va_dbi_base);
	}

	platform_set_drvdata(pdev, target);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no update irq?\n");
		return irq;
	}

	status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
				  0, pdev->name, NULL);
	if (status) {
		dev_err(&pdev->dev,
			"pcie gadget interrupt IRQ%d already claimed\n", irq);
		return status;
	}

	/* Register configfs hooks */
	subsys = &target->subsys;
	config_group_init(&subsys->su_group);
	mutex_init(&subsys->su_mutex);
	status = configfs_register_subsystem(subsys);
	if (status)
		return status;

	/*
	 * init basic pcie application registers
	 * do not enable clock if it is PCIE0.Ideally , all controller should
	 * have been independent from others with respect to clock. But PCIE1
	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
	 */
	if (pdev->id == 1) {
		/*
		 * Ideally CFG Clock should have been also enabled here. But
		 * it is done currently during board init routne
		 */
		clk = clk_get_sys("pcie1", NULL);
		if (IS_ERR(clk)) {
			pr_err("%s:couldn't get clk for pcie1\n", __func__);
			return PTR_ERR(clk);
		}
		status = clk_enable(clk);
		if (status) {
			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
			return status;
		}
	} else if (pdev->id == 2) {
		/*
		 * Ideally CFG Clock should have been also enabled here. But
		 * it is done currently during board init routne
		 */
		clk = clk_get_sys("pcie2", NULL);
		if (IS_ERR(clk)) {
			pr_err("%s:couldn't get clk for pcie2\n", __func__);
			return PTR_ERR(clk);
		}
		status = clk_enable(clk);
		if (status) {
			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
			return status;
		}
	}
	spear13xx_pcie_device_init(config);

	return 0;
}

static int spear_pcie_gadget_remove(struct platform_device *pdev)
{
	static struct pcie_gadget_target *target;

	target = platform_get_drvdata(pdev);

	configfs_unregister_subsystem(&target->subsys);

	return 0;
}

static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
{
}

static struct platform_driver spear_pcie_gadget_driver = {
	.probe = spear_pcie_gadget_probe,
	.remove = spear_pcie_gadget_remove,
	.shutdown = spear_pcie_gadget_shutdown,
	.driver = {
		.name = "pcie-gadget-spear",
		.bus = &platform_bus_type
	},
};

module_platform_driver(spear_pcie_gadget_driver);

MODULE_ALIAS("platform:pcie-gadget-spear");
MODULE_AUTHOR("Pratyush Anand");
MODULE_LICENSE("GPL");
back to top