Revision e9ba4032a2568ef5b360de95eb6e09195a1b2efd authored by Maarten Lankhorst on 17 July 2017, 11:13:55 UTC, committed by Daniel Vetter on 27 July 2017, 20:07:29 UTC
ddb_allocation && ddb_allocation / blocks_per_line >= 1 is the same
as ddb_allocation >= blocks_per_line, so use the latter to simplify
this.

This fixes the following compiler warning:

drivers/gpu/drm/i915/intel_pm.c:4467]: (warning) Comparison of a
boolean expression with an integer other than 0 or 1.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Fixes: d555cb5827d6 ("drm/i915/skl+: use linetime latency if ddb size is not available")
Cc: "Mahesh Kumar" <mahesh1.kumar@intel.com>
Reported-by: David Binderman <dcb314@hotmail.com>
Cc: David Binderman <dcb314@hotmail.com>
Cc: <drm-intel-fixes@lists.freedesktop.org> # v4.13-rc1+
Reviewed-by: Mahesh Kumar <mahesh1.kumar@intel.com>
(cherry picked from commit 54d20ed1fff23c7d2633f01fc788111bf9c51c5d)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170717111355.4523-1-maarten.lankhorst@linux.intel.com
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
1 parent 7b98da6
Raw File
mm_types_task.h
#ifndef _LINUX_MM_TYPES_TASK_H
#define _LINUX_MM_TYPES_TASK_H

/*
 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
 *
 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
 */

#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>

#include <asm/page.h>

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
#include <asm/tlbbatch.h>
#endif

#define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)

/*
 * The per task VMA cache array:
 */
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)

struct vmacache {
	u32 seqnum;
	struct vm_area_struct *vmas[VMACACHE_SIZE];
};

enum {
	MM_FILEPAGES,	/* Resident file mapping pages */
	MM_ANONPAGES,	/* Resident anonymous pages */
	MM_SWAPENTS,	/* Anonymous swap entries */
	MM_SHMEMPAGES,	/* Resident shared memory pages */
	NR_MM_COUNTERS
};

#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
#define SPLIT_RSS_COUNTING
/* per-thread cached information, */
struct task_rss_stat {
	int events;	/* for synchronization threshold */
	int count[NR_MM_COUNTERS];
};
#endif /* USE_SPLIT_PTE_PTLOCKS */

struct mm_rss_stat {
	atomic_long_t count[NR_MM_COUNTERS];
};

struct page_frag {
	struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
	__u32 offset;
	__u32 size;
#else
	__u16 offset;
	__u16 size;
#endif
};

/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
	/*
	 * The arch code makes the following promise: generic code can modify a
	 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
	 * needed barriers), then call arch_tlbbatch_flush(), and the entries
	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
	 * returns.
	 */
	struct arch_tlbflush_unmap_batch arch;

	/* True if a flush is needed. */
	bool flush_required;

	/*
	 * If true then the PTE was dirty when unmapped. The entry must be
	 * flushed before IO is initiated or a stale TLB entry potentially
	 * allows an update without redirtying the page.
	 */
	bool writable;
#endif
};

#endif /* _LINUX_MM_TYPES_TASK_H */
back to top