https://github.com/torvalds/linux
Revision 275e88b06a277ccf89d9c471a777e9b4f8c552b0 authored by Rob Herring on 18 December 2020, 14:39:05 UTC, committed by Bjorn Helgaas on 26 December 2020, 03:58:36 UTC
Commit b9ac0f9dc8ea ("PCI: dwc: Move dw_pcie_setup_rc() to DWC common
code") broke enumeration of downstream devices on Tegra:

In non-working case (next-20201211):

  0001:00:00.0 PCI bridge: NVIDIA Corporation Device 1ad2 (rev a1)
  0001:01:00.0 SATA controller: Marvell Technology Group Ltd. Device 9171 (rev 13)
  0005:00:00.0 PCI bridge: NVIDIA Corporation Device 1ad0 (rev a1)

In working case (v5.10-rc7):

  0001:00:00.0 PCI bridge: Molex Incorporated Device 1ad2 (rev a1)
  0001:01:00.0 SATA controller: Marvell Technology Group Ltd. Device 9171 (rev 13)
  0005:00:00.0 PCI bridge: Molex Incorporated Device 1ad0 (rev a1)
  0005:01:00.0 PCI bridge: PLX Technology, Inc. Device 3380 (rev ab)
  0005:02:02.0 PCI bridge: PLX Technology, Inc. Device 3380 (rev ab)
  0005:03:00.0 USB controller: PLX Technology, Inc. Device 3380 (rev ab)

The problem seems to be dw_pcie_setup_rc() is now called twice before and
after the link up handling. The fix is to move Tegra's link up handling to
.start_link() function like other DWC drivers. Tegra is a bit more
complicated than others as it re-inits the whole DWC controller to retry
the link. With this, the initialization ordering is restored to match the
prior sequence.

Fixes: b9ac0f9dc8ea ("PCI: dwc: Move dw_pcie_setup_rc() to DWC common code")
Link: https://lore.kernel.org/r/20201218143905.1614098-1-robh@kernel.org
Reported-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Tested-by: Mian Yousaf Kaukab <ykaukab@suse.de>
Signed-off-by: Rob Herring <robh@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: Jonathan Hunter <jonathanh@nvidia.com>
Cc: Vidya Sagar <vidyas@nvidia.com>
1 parent 255b2d5
Raw File
Tip revision: 275e88b06a277ccf89d9c471a777e9b4f8c552b0 authored by Rob Herring on 18 December 2020, 14:39:05 UTC
PCI: tegra: Fix host link initialization
Tip revision: 275e88b
timer-pistachio.c
/*
 * Pistachio clocksource based on general-purpose timers
 *
 * Copyright (C) 2015 Imagination Technologies
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License. See the file "COPYING" in the main directory of this archive
 * for more details.
 */

#define pr_fmt(fmt) "%s: " fmt, __func__

#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched_clock.h>
#include <linux/time.h>

/* Top level reg */
#define CR_TIMER_CTRL_CFG		0x00
#define TIMER_ME_GLOBAL			BIT(0)
#define CR_TIMER_REV			0x10

/* Timer specific registers */
#define TIMER_CFG			0x20
#define TIMER_ME_LOCAL			BIT(0)
#define TIMER_RELOAD_VALUE		0x24
#define TIMER_CURRENT_VALUE		0x28
#define TIMER_CURRENT_OVERFLOW_VALUE	0x2C
#define TIMER_IRQ_STATUS		0x30
#define TIMER_IRQ_CLEAR			0x34
#define TIMER_IRQ_MASK			0x38

#define PERIP_TIMER_CONTROL		0x90

/* Timer specific configuration Values */
#define RELOAD_VALUE			0xffffffff

struct pistachio_clocksource {
	void __iomem *base;
	raw_spinlock_t lock;
	struct clocksource cs;
};

static struct pistachio_clocksource pcs_gpt;

#define to_pistachio_clocksource(cs)	\
	container_of(cs, struct pistachio_clocksource, cs)

static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
{
	return readl(base + 0x20 * gpt_id + offset);
}

static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
		u32 gpt_id)
{
	writel(value, base + 0x20 * gpt_id + offset);
}

static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
	u32 counter, overflw;
	unsigned long flags;

	/*
	 * The counter value is only refreshed after the overflow value is read.
	 * And they must be read in strict order, hence raw spin lock added.
	 */

	raw_spin_lock_irqsave(&pcs->lock, flags);
	overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
	counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
	raw_spin_unlock_irqrestore(&pcs->lock, flags);

	return (u64)~counter;
}

static u64 notrace pistachio_read_sched_clock(void)
{
	return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
}

static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
			int enable)
{
	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
	u32 val;

	val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
	if (enable)
		val |= TIMER_ME_LOCAL;
	else
		val &= ~TIMER_ME_LOCAL;

	gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
}

static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
{
	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);

	/* Disable GPT local before loading reload value */
	pistachio_clksrc_set_mode(cs, timeridx, false);
	gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
	pistachio_clksrc_set_mode(cs, timeridx, true);
}

static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
{
	/* Disable GPT local */
	pistachio_clksrc_set_mode(cs, timeridx, false);
}

static int pistachio_clocksource_enable(struct clocksource *cs)
{
	pistachio_clksrc_enable(cs, 0);
	return 0;
}

static void pistachio_clocksource_disable(struct clocksource *cs)
{
	pistachio_clksrc_disable(cs, 0);
}

/* Desirable clock source for pistachio platform */
static struct pistachio_clocksource pcs_gpt = {
	.cs =	{
		.name		= "gptimer",
		.rating		= 300,
		.enable		= pistachio_clocksource_enable,
		.disable	= pistachio_clocksource_disable,
		.read		= pistachio_clocksource_read_cycles,
		.mask		= CLOCKSOURCE_MASK(32),
		.flags		= CLOCK_SOURCE_IS_CONTINUOUS |
				  CLOCK_SOURCE_SUSPEND_NONSTOP,
		},
};

static int __init pistachio_clksrc_of_init(struct device_node *node)
{
	struct clk *sys_clk, *fast_clk;
	struct regmap *periph_regs;
	unsigned long rate;
	int ret;

	pcs_gpt.base = of_iomap(node, 0);
	if (!pcs_gpt.base) {
		pr_err("cannot iomap\n");
		return -ENXIO;
	}

	periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
	if (IS_ERR(periph_regs)) {
		pr_err("cannot get peripheral regmap (%ld)\n",
		       PTR_ERR(periph_regs));
		return PTR_ERR(periph_regs);
	}

	/* Switch to using the fast counter clock */
	ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
				 0xf, 0x0);
	if (ret)
		return ret;

	sys_clk = of_clk_get_by_name(node, "sys");
	if (IS_ERR(sys_clk)) {
		pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
		return PTR_ERR(sys_clk);
	}

	fast_clk = of_clk_get_by_name(node, "fast");
	if (IS_ERR(fast_clk)) {
		pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
		return PTR_ERR(fast_clk);
	}

	ret = clk_prepare_enable(sys_clk);
	if (ret < 0) {
		pr_err("failed to enable clock (%d)\n", ret);
		return ret;
	}

	ret = clk_prepare_enable(fast_clk);
	if (ret < 0) {
		pr_err("failed to enable clock (%d)\n", ret);
		clk_disable_unprepare(sys_clk);
		return ret;
	}

	rate = clk_get_rate(fast_clk);

	/* Disable irq's for clocksource usage */
	gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
	gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
	gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
	gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);

	/* Enable timer block */
	writel(TIMER_ME_GLOBAL, pcs_gpt.base);

	raw_spin_lock_init(&pcs_gpt.lock);
	sched_clock_register(pistachio_read_sched_clock, 32, rate);
	return clocksource_register_hz(&pcs_gpt.cs, rate);
}
TIMER_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
		       pistachio_clksrc_of_init);
back to top