Revision 589cb2c0b8daf4fd03e7012c00a52804769e7699 authored by Rafael J. Wysocki on 30 June 2022, 13:30:30 UTC, committed by Rafael J. Wysocki on 30 June 2022, 13:30:30 UTC
Pull devfreq fixes for 5.19-rc5 from Chanwoo Choi:

"1. Fix devfreq passive governor issue when cpufreq policies are not
    ready during kernel boot because some CPUs turn on after kernel
    booting or others.

    - Re-initialize the vairables of struct devfreq_passive_data when
      PROBE_DEFER happens when cpufreq_get() returns NULL.

    - Use dev_err_probe to mute warning when PROBE_DEFER.

    - Fix cpufreq passive unregister erroring on PROBE_DEFER
      by using the allocated parent_cpu_data list to free resouce
      instead of for_each_possible_cpu().

    - Remove duplicate cpufreq passive unregister and warning when
      PROBE_DEFER.

    - Use HZ_PER_KZH macro in units.h.

    - Fix wrong indentation in SPDX-License line.

 2. Fix reference count leak in exynos-ppmu.c by using of_node_put().

 3. Rework freq_table to be local to devfreq struct

    - struct devfreq_dev_profile includes freq_table array to store
      the supported frequencies. If devfreq driver doesn't initialize
      the freq_table, devfreq core allocates the memory and initializes
      the freq_table.

      On a devfreq PROBE_DEFER, the freq_table in the driver profile
      struct is never reset and may be left in an undefined state. To fix
      this and correctly handle PROBE_DEFER, use a local freq_table and
      max_state in the devfreq struct."

* tag 'devfreq-fixes-for-5.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux:
  PM / devfreq: passive: revert an editing accident in SPDX-License line
  PM / devfreq: Fix kernel warning with cpufreq passive register fail
  PM / devfreq: Rework freq_table to be local to devfreq struct
  PM / devfreq: exynos-ppmu: Fix refcount leak in of_get_devfreq_events
  PM / devfreq: passive: Use HZ_PER_KHZ macro in units.h
  PM / devfreq: Fix cpufreq passive unregister erroring on PROBE_DEFER
  PM / devfreq: Mute warning on governor PROBE_DEFER
  PM / devfreq: Fix kernel panic with cpu based scaling to passive gov
2 parent s 03c765b + f08fe6f
Raw File
blk-cgroup-rwstat.h
/* SPDX-License-Identifier: GPL-2.0
 *
 * Legacy blkg rwstat helpers enabled by CONFIG_BLK_CGROUP_RWSTAT.
 * Do not use in new code.
 */
#ifndef _BLK_CGROUP_RWSTAT_H
#define _BLK_CGROUP_RWSTAT_H

#include "blk-cgroup.h"

enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,
	BLKG_RWSTAT_DISCARD,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
};

/*
 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
 * recursive.  Used to carry stats of dead children.
 */
struct blkg_rwstat {
	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
};

struct blkg_rwstat_sample {
	u64				cnt[BLKG_RWSTAT_NR];
};

static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
		unsigned int idx)
{
	return atomic64_read(&rwstat->aux_cnt[idx]) +
		percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
}

int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
void blkg_rwstat_exit(struct blkg_rwstat *rwstat);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
			 const struct blkg_rwstat_sample *rwstat);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
		int off, struct blkg_rwstat_sample *sum);


/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @op: REQ_OP and flags
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   unsigned int op, uint64_t val)
{
	struct percpu_counter *cnt;

	if (op_is_discard(op))
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
	else if (op_is_write(op))
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
	else
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];

	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);

	if (op_is_sync(op))
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
	else
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];

	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it in the aux counts.
 */
static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
		struct blkg_rwstat_sample *result)
{
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		result->cnt[i] =
			percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
}

/**
 * blkg_rwstat_total - read the total count of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
{
	struct blkg_rwstat_sample tmp = { };

	blkg_rwstat_read(rwstat, &tmp);
	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
		atomic64_set(&rwstat->aux_cnt[i], 0);
	}
}

/**
 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
 * @to: the destination blkg_rwstat
 * @from: the source
 *
 * Add @from's count including the aux one to @to's aux count.
 */
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
				       struct blkg_rwstat *from)
{
	u64 sum[BLKG_RWSTAT_NR];
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
			     &to->aux_cnt[i]);
}
#endif	/* _BLK_CGROUP_RWSTAT_H */
back to top