Revision a81bfdf8bf5396824d7d139560180854cb599b06 authored by Linus Torvalds on 12 February 2021, 19:29:06 UTC, committed by Linus Torvalds on 12 February 2021, 19:29:06 UTC
Pull drm fixes from Dave Airlie:
 "Regular fixes for final, there is a ttm regression fix, dp-mst fix,
  one amdgpu revert, two i915 fixes, and some misc fixes for sun4i,
  xlnx, and vc4.

  All pretty quiet and don't think we have any known outstanding
  regressions.

  ttm:
   - page pool regression fix.

  dp_mst:
   - don't report un-attached ports as connected

  amdgpu:
   - blank screen fix

  i915:
   - ensure Type-C FIA is powered when initializing
   - fix overlay frontbuffer tracking

  sun4i:
   - tcon1 sync polarity fix
   - always set HDMI clock rate
   - fix H6 HDMI PHY config
   - fix H6 max frequency

  vc4:
   - fix buffer overflow

  xlnx:
   - fix memory leak"

* tag 'drm-fixes-2021-02-12' of git://anongit.freedesktop.org/drm/drm:
  drm/ttm: make sure pool pages are cleared
  drm/sun4i: dw-hdmi: Fix max. frequency for H6
  drm/sun4i: Fix H6 HDMI PHY configuration
  drm/sun4i: dw-hdmi: always set clock rate
  drm/sun4i: tcon: set sync polarity for tcon1 channel
  drm/i915: Fix overlay frontbuffer tracking
  Revert "drm/amd/display: Update NV1x SR latency values"
  drm/i915/tgl+: Make sure TypeC FIA is powered up when initializing it
  drm/dp_mst: Don't report ports connected if nothing is attached to them
  drm/xlnx: fix kmemleak by sending vblank_event in atomic_disable
  drm/vc4: hvs: Fix buffer overflow with the dlist handling
2 parent s e77a681 + 551c818
Raw File
core.c
/*
 * Broadcom specific AMBA
 * Core ops
 *
 * Licensed under the GNU/GPL. See COPYING for details.
 */

#include "bcma_private.h"
#include <linux/export.h>
#include <linux/bcma/bcma.h>

static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask,
				 u32 value, int timeout)
{
	unsigned long deadline = jiffies + timeout;
	u32 val;

	do {
		val = bcma_aread32(core, reg);
		if ((val & mask) == value)
			return true;
		cpu_relax();
		udelay(10);
	} while (!time_after_eq(jiffies, deadline));

	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);

	return false;
}

bool bcma_core_is_enabled(struct bcma_device *core)
{
	if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
	    != BCMA_IOCTL_CLK)
		return false;
	if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
		return false;
	return true;
}
EXPORT_SYMBOL_GPL(bcma_core_is_enabled);

void bcma_core_disable(struct bcma_device *core, u32 flags)
{
	if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
		return;

	bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300);

	bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
	bcma_aread32(core, BCMA_RESET_CTL);
	udelay(1);

	bcma_awrite32(core, BCMA_IOCTL, flags);
	bcma_aread32(core, BCMA_IOCTL);
	udelay(10);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);

int bcma_core_enable(struct bcma_device *core, u32 flags)
{
	bcma_core_disable(core, flags);

	bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
	bcma_aread32(core, BCMA_IOCTL);

	bcma_awrite32(core, BCMA_RESET_CTL, 0);
	bcma_aread32(core, BCMA_RESET_CTL);
	udelay(1);

	bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
	bcma_aread32(core, BCMA_IOCTL);
	udelay(1);

	return 0;
}
EXPORT_SYMBOL_GPL(bcma_core_enable);

void bcma_core_set_clockmode(struct bcma_device *core,
			     enum bcma_clkmode clkmode)
{
	u16 i;

	WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
		core->id.id != BCMA_CORE_PCIE &&
		core->id.id != BCMA_CORE_80211);

	switch (clkmode) {
	case BCMA_CLKMODE_FAST:
		bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
		usleep_range(64, 300);
		for (i = 0; i < 1500; i++) {
			if (bcma_read32(core, BCMA_CLKCTLST) &
			    BCMA_CLKCTLST_HAVEHT) {
				i = 0;
				break;
			}
			udelay(10);
		}
		if (i)
			bcma_err(core->bus, "HT force timeout\n");
		break;
	case BCMA_CLKMODE_DYNAMIC:
		bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
		break;
	}
}
EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);

void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
{
	u16 i;

	WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
	WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);

	if (on) {
		bcma_set32(core, BCMA_CLKCTLST, req);
		for (i = 0; i < 10000; i++) {
			if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
			    status) {
				i = 0;
				break;
			}
			udelay(10);
		}
		if (i)
			bcma_err(core->bus, "PLL enable timeout\n");
	} else {
		/*
		 * Mask the PLL but don't wait for it to be disabled. PLL may be
		 * shared between cores and will be still up if there is another
		 * core using it.
		 */
		bcma_mask32(core, BCMA_CLKCTLST, ~req);
		bcma_read32(core, BCMA_CLKCTLST);
	}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);

u32 bcma_core_dma_translation(struct bcma_device *core)
{
	switch (core->bus->hosttype) {
	case BCMA_HOSTTYPE_SOC:
		return 0;
	case BCMA_HOSTTYPE_PCI:
		if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
			return BCMA_DMA_TRANSLATION_DMA64_CMT;
		else
			return BCMA_DMA_TRANSLATION_DMA32_CMT;
	default:
		bcma_err(core->bus, "DMA translation unknown for host %d\n",
			 core->bus->hosttype);
	}
	return BCMA_DMA_TRANSLATION_NONE;
}
EXPORT_SYMBOL(bcma_core_dma_translation);
back to top