Revision 787252a10d9422f3058df9a4821f389e5326c440 authored by Nathan Lynch on 15 October 2021, 17:39:02 UTC, committed by Michael Ellerman on 20 October 2021, 10:38:01 UTC
With PREEMPT_COUNT=y, when a CPU is offlined and then onlined again, we
get:

BUG: scheduling while atomic: swapper/1/0/0x00000000
no locks held by swapper/1/0.
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.15.0-rc2+ #100
Call Trace:
 dump_stack_lvl+0xac/0x108
 __schedule_bug+0xac/0xe0
 __schedule+0xcf8/0x10d0
 schedule_idle+0x3c/0x70
 do_idle+0x2d8/0x4a0
 cpu_startup_entry+0x38/0x40
 start_secondary+0x2ec/0x3a0
 start_secondary_prolog+0x10/0x14

This is because powerpc's arch_cpu_idle_dead() decrements the idle task's
preempt count, for reasons explained in commit a7c2bb8279d2 ("powerpc:
Re-enable preemption before cpu_die()"), specifically "start_secondary()
expects a preempt_count() of 0."

However, since commit 2c669ef6979c ("powerpc/preempt: Don't touch the idle
task's preempt_count during hotplug") and commit f1a0a376ca0c ("sched/core:
Initialize the idle task with preemption disabled"), that justification no
longer holds.

The idle task isn't supposed to re-enable preemption, so remove the
vestigial preempt_enable() from the CPU offline path.

Tested with pseries and powernv in qemu, and pseries on PowerVM.

Fixes: 2c669ef6979c ("powerpc/preempt: Don't touch the idle task's preempt_count during hotplug")
Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211015173902.2278118-1-nathanl@linux.ibm.com

1 parent 496c5fe
Raw File
michael_mic.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Cryptographic API
 *
 * Michael MIC (IEEE 802.11i/TKIP) keyed digest
 *
 * Copyright (c) 2004 Jouni Malinen <j@w1.fi>
 */
#include <crypto/internal/hash.h>
#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>


struct michael_mic_ctx {
	u32 l, r;
};

struct michael_mic_desc_ctx {
	__le32 pending;
	size_t pending_len;

	u32 l, r;
};

static inline u32 xswap(u32 val)
{
	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
}


#define michael_block(l, r)	\
do {				\
	r ^= rol32(l, 17);	\
	l += r;			\
	r ^= xswap(l);		\
	l += r;			\
	r ^= rol32(l, 3);	\
	l += r;			\
	r ^= ror32(l, 2);	\
	l += r;			\
} while (0)


static int michael_init(struct shash_desc *desc)
{
	struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
	struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm);
	mctx->pending_len = 0;
	mctx->l = ctx->l;
	mctx->r = ctx->r;

	return 0;
}


static int michael_update(struct shash_desc *desc, const u8 *data,
			   unsigned int len)
{
	struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);

	if (mctx->pending_len) {
		int flen = 4 - mctx->pending_len;
		if (flen > len)
			flen = len;
		memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen);
		mctx->pending_len += flen;
		data += flen;
		len -= flen;

		if (mctx->pending_len < 4)
			return 0;

		mctx->l ^= le32_to_cpu(mctx->pending);
		michael_block(mctx->l, mctx->r);
		mctx->pending_len = 0;
	}

	while (len >= 4) {
		mctx->l ^= get_unaligned_le32(data);
		michael_block(mctx->l, mctx->r);
		data += 4;
		len -= 4;
	}

	if (len > 0) {
		mctx->pending_len = len;
		memcpy(&mctx->pending, data, len);
	}

	return 0;
}


static int michael_final(struct shash_desc *desc, u8 *out)
{
	struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
	u8 *data = (u8 *)&mctx->pending;

	/* Last block and padding (0x5a, 4..7 x 0) */
	switch (mctx->pending_len) {
	case 0:
		mctx->l ^= 0x5a;
		break;
	case 1:
		mctx->l ^= data[0] | 0x5a00;
		break;
	case 2:
		mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000;
		break;
	case 3:
		mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
			0x5a000000;
		break;
	}
	michael_block(mctx->l, mctx->r);
	/* l ^= 0; */
	michael_block(mctx->l, mctx->r);

	put_unaligned_le32(mctx->l, out);
	put_unaligned_le32(mctx->r, out + 4);

	return 0;
}


static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
			  unsigned int keylen)
{
	struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);

	if (keylen != 8)
		return -EINVAL;

	mctx->l = get_unaligned_le32(key);
	mctx->r = get_unaligned_le32(key + 4);
	return 0;
}

static struct shash_alg alg = {
	.digestsize		=	8,
	.setkey			=	michael_setkey,
	.init			=	michael_init,
	.update			=	michael_update,
	.final			=	michael_final,
	.descsize		=	sizeof(struct michael_mic_desc_ctx),
	.base			=	{
		.cra_name		=	"michael_mic",
		.cra_driver_name	=	"michael_mic-generic",
		.cra_blocksize		=	8,
		.cra_ctxsize		=	sizeof(struct michael_mic_ctx),
		.cra_module		=	THIS_MODULE,
	}
};

static int __init michael_mic_init(void)
{
	return crypto_register_shash(&alg);
}


static void __exit michael_mic_exit(void)
{
	crypto_unregister_shash(&alg);
}


subsys_initcall(michael_mic_init);
module_exit(michael_mic_exit);

MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Michael MIC");
MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
MODULE_ALIAS_CRYPTO("michael_mic");
back to top