Revision 082cd4ec240b8734a82a89ffb890216ac98fec68 authored by Ye Bin on 06 May 2021, 14:10:42 UTC, committed by Theodore Ts'o on 06 June 2021, 14:09:55 UTC
We got follow bug_on when run fsstress with injecting IO fault:
[130747.323114] kernel BUG at fs/ext4/extents_status.c:762!
[130747.323117] Internal error: Oops - BUG: 0 [#1] SMP
......
[130747.334329] Call trace:
[130747.334553]  ext4_es_cache_extent+0x150/0x168 [ext4]
[130747.334975]  ext4_cache_extents+0x64/0xe8 [ext4]
[130747.335368]  ext4_find_extent+0x300/0x330 [ext4]
[130747.335759]  ext4_ext_map_blocks+0x74/0x1178 [ext4]
[130747.336179]  ext4_map_blocks+0x2f4/0x5f0 [ext4]
[130747.336567]  ext4_mpage_readpages+0x4a8/0x7a8 [ext4]
[130747.336995]  ext4_readpage+0x54/0x100 [ext4]
[130747.337359]  generic_file_buffered_read+0x410/0xae8
[130747.337767]  generic_file_read_iter+0x114/0x190
[130747.338152]  ext4_file_read_iter+0x5c/0x140 [ext4]
[130747.338556]  __vfs_read+0x11c/0x188
[130747.338851]  vfs_read+0x94/0x150
[130747.339110]  ksys_read+0x74/0xf0

This patch's modification is according to Jan Kara's suggestion in:
https://patchwork.ozlabs.org/project/linux-ext4/patch/20210428085158.3728201-1-yebin10@huawei.com/
"I see. Now I understand your patch. Honestly, seeing how fragile is trying
to fix extent tree after split has failed in the middle, I would probably
go even further and make sure we fix the tree properly in case of ENOSPC
and EDQUOT (those are easily user triggerable).  Anything else indicates a
HW problem or fs corruption so I'd rather leave the extent tree as is and
don't try to fix it (which also means we will not create overlapping
extents)."

Cc: stable@kernel.org
Signed-off-by: Ye Bin <yebin10@huawei.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20210506141042.3298679-1-yebin10@huawei.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
1 parent b45f189
Raw File
cbc.c
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * CBC: Cipher Block Chaining mode
 *
 * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
 */

#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>

static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
				      struct crypto_skcipher *skcipher)
{
	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	struct crypto_cipher *cipher;
	struct crypto_tfm *tfm;
	u8 *iv = walk->iv;

	cipher = skcipher_cipher_simple(skcipher);
	tfm = crypto_cipher_tfm(cipher);
	fn = crypto_cipher_alg(cipher)->cia_encrypt;

	do {
		crypto_xor(iv, src, bsize);
		fn(tfm, dst, iv);
		memcpy(iv, dst, bsize);

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	return nbytes;
}

static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
				      struct crypto_skcipher *skcipher)
{
	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	struct crypto_cipher *cipher;
	struct crypto_tfm *tfm;
	u8 *iv = walk->iv;

	cipher = skcipher_cipher_simple(skcipher);
	tfm = crypto_cipher_tfm(cipher);
	fn = crypto_cipher_alg(cipher)->cia_encrypt;

	do {
		crypto_xor(src, iv, bsize);
		fn(tfm, src, src);
		iv = src;

		src += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}

static int crypto_cbc_encrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			err = crypto_cbc_encrypt_inplace(&walk, skcipher);
		else
			err = crypto_cbc_encrypt_segment(&walk, skcipher);
		err = skcipher_walk_done(&walk, err);
	}

	return err;
}

static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
				      struct crypto_skcipher *skcipher)
{
	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 *dst = walk->dst.virt.addr;
	struct crypto_cipher *cipher;
	struct crypto_tfm *tfm;
	u8 *iv = walk->iv;

	cipher = skcipher_cipher_simple(skcipher);
	tfm = crypto_cipher_tfm(cipher);
	fn = crypto_cipher_alg(cipher)->cia_decrypt;

	do {
		fn(tfm, dst, src);
		crypto_xor(dst, iv, bsize);
		iv = src;

		src += bsize;
		dst += bsize;
	} while ((nbytes -= bsize) >= bsize);

	memcpy(walk->iv, iv, bsize);

	return nbytes;
}

static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
				      struct crypto_skcipher *skcipher)
{
	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
	unsigned int nbytes = walk->nbytes;
	u8 *src = walk->src.virt.addr;
	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
	struct crypto_cipher *cipher;
	struct crypto_tfm *tfm;

	cipher = skcipher_cipher_simple(skcipher);
	tfm = crypto_cipher_tfm(cipher);
	fn = crypto_cipher_alg(cipher)->cia_decrypt;

	/* Start of the last block. */
	src += nbytes - (nbytes & (bsize - 1)) - bsize;
	memcpy(last_iv, src, bsize);

	for (;;) {
		fn(tfm, src, src);
		if ((nbytes -= bsize) < bsize)
			break;
		crypto_xor(src, src - bsize, bsize);
		src -= bsize;
	}

	crypto_xor(src, walk->iv, bsize);
	memcpy(walk->iv, last_iv, bsize);

	return nbytes;
}

static int crypto_cbc_decrypt(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct skcipher_walk walk;
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	while (walk.nbytes) {
		if (walk.src.virt.addr == walk.dst.virt.addr)
			err = crypto_cbc_decrypt_inplace(&walk, skcipher);
		else
			err = crypto_cbc_decrypt_segment(&walk, skcipher);
		err = skcipher_walk_done(&walk, err);
	}

	return err;
}

static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
	struct skcipher_instance *inst;
	struct crypto_alg *alg;
	int err;

	inst = skcipher_alloc_instance_simple(tmpl, tb);
	if (IS_ERR(inst))
		return PTR_ERR(inst);

	alg = skcipher_ialg_simple(inst);

	err = -EINVAL;
	if (!is_power_of_2(alg->cra_blocksize))
		goto out_free_inst;

	inst->alg.encrypt = crypto_cbc_encrypt;
	inst->alg.decrypt = crypto_cbc_decrypt;

	err = skcipher_register_instance(tmpl, inst);
	if (err) {
out_free_inst:
		inst->free(inst);
	}

	return err;
}

static struct crypto_template crypto_cbc_tmpl = {
	.name = "cbc",
	.create = crypto_cbc_create,
	.module = THIS_MODULE,
};

static int __init crypto_cbc_module_init(void)
{
	return crypto_register_template(&crypto_cbc_tmpl);
}

static void __exit crypto_cbc_module_exit(void)
{
	crypto_unregister_template(&crypto_cbc_tmpl);
}

subsys_initcall(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cbc");
back to top