Revision a742994aa2e271eb8cd8e043d276515ec858ed73 authored by Filipe Manana on 13 February 2015, 16:56:14 UTC, committed by Chris Mason on 14 February 2015, 16:22:49 UTC
If we are recording in the tree log that an inode has new names (new hard
links were added), we would drop items, belonging to the inode, that we
shouldn't:

1) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime
   flags, we ended up dropping all the extent and xattr items that were
   previously logged. This was done only in memory, since logging a new
   name doesn't imply syncing the log;

2) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime
   flags, we ended up dropping all the xattr items that were previously
   logged. Like the case before, this was done only in memory because
   logging a new name doesn't imply syncing the log.

This led to some surprises in scenarios such as the following:

1) write some extents to an inode;
2) fsync the inode;
3) truncate the inode or delete/modify some of its xattrs
4) add a new hard link for that inode
5) fsync some other file, to force the log tree to be durably persisted
6) power failure happens

The next time the fs is mounted, the fsync log replay code is executed,
and the resulting file doesn't have the content it had when the last fsync
against it was performed, instead if has a content matching what it had
when the last transaction commit happened.

So change the behaviour such that when a new name is logged, only the inode
item and reference items are processed.

This is easy to reproduce with the test I just made for xfstests, whose
main body is:

  _scratch_mkfs >> $seqres.full 2>&1
  _init_flakey
  _mount_flakey

  # Create our test file with some data.
  $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 8K 0 8K" \
      $SCRATCH_MNT/foo | _filter_xfs_io

  # Make sure the file is durably persisted.
  sync

  # Append some data to our file, to increase its size.
  $XFS_IO_PROG -f -c "pwrite -S 0xcc -b 4K 8K 4K" \
      $SCRATCH_MNT/foo | _filter_xfs_io

  # Fsync the file, so from this point on if a crash/power failure happens, our
  # new data is guaranteed to be there next time the fs is mounted.
  $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo

  # Now shrink our file to 5000 bytes.
  $XFS_IO_PROG -c "truncate 5000" $SCRATCH_MNT/foo

  # Now do an expanding truncate to a size larger than what we had when we last
  # fsync'ed our file. This is just to verify that after power failure and
  # replaying the fsync log, our file matches what it was when we last fsync'ed
  # it - 12Kb size, first 8Kb of data had a value of 0xaa and the last 4Kb of
  # data had a value of 0xcc.
  $XFS_IO_PROG -c "truncate 32K" $SCRATCH_MNT/foo

  # Add one hard link to our file. This made btrfs drop all of our file's
  # metadata from the fsync log, including the metadata relative to the
  # extent we just wrote and fsync'ed. This change was made only to the fsync
  # log in memory, so adding the hard link alone doesn't change the persisted
  # fsync log. This happened because the previous truncates set the runtime
  # flag BTRFS_INODE_NEEDS_FULL_SYNC in the btrfs inode structure.
  ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link

  # Now make sure the in memory fsync log is durably persisted.
  # Creating and fsync'ing another file will do it.
  # After this our persisted fsync log will no longer have metadata for our file
  # foo that points to the extent we wrote and fsync'ed before.
  touch $SCRATCH_MNT/bar
  $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar

  # As expected, before the crash/power failure, we should be able to see a file
  # with a size of 32Kb, with its first 5000 bytes having the value 0xaa and all
  # the remaining bytes with value 0x00.
  echo "File content before:"
  od -t x1 $SCRATCH_MNT/foo

  # Simulate a crash/power loss.
  _load_flakey_table $FLAKEY_DROP_WRITES
  _unmount_flakey

  _load_flakey_table $FLAKEY_ALLOW_WRITES
  _mount_flakey

  # After mounting the fs again, the fsync log was replayed.
  # The expected result is to see a file with a size of 12Kb, with its first 8Kb
  # of data having the value 0xaa and its last 4Kb of data having a value of 0xcc.
  # The btrfs bug used to leave the file as it used te be as of the last
  # transaction commit - that is, with a size of 8Kb with all bytes having a
  # value of 0xaa.
  echo "File content after:"
  od -t x1 $SCRATCH_MNT/foo

The test case for xfstests follows soon.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
1 parent 1a4bcf4
Raw File
bug.c
/*
  Generic support for BUG()

  This respects the following config options:

  CONFIG_BUG - emit BUG traps.  Nothing happens without this.
  CONFIG_GENERIC_BUG - enable this code.
  CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
	the containing struct bug_entry for bug_addr and file.
  CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG

  CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
  (though they're generally always on).

  CONFIG_GENERIC_BUG is set by each architecture using this code.

  To use this, your architecture must:

  1. Set up the config options:
     - Enable CONFIG_GENERIC_BUG if CONFIG_BUG

  2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
     - Define HAVE_ARCH_BUG
     - Implement BUG() to generate a faulting instruction
     - NOTE: struct bug_entry does not have "file" or "line" entries
       when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
       the values accordingly.

  3. Implement the trap
     - In the illegal instruction trap handler (typically), verify
       that the fault was in kernel mode, and call report_bug()
     - report_bug() will return whether it was a false alarm, a warning,
       or an actual bug.
     - You must implement the is_valid_bugaddr(bugaddr) callback which
       returns true if the eip is a real kernel address, and it points
       to the expected BUG trap instruction.

    Jeremy Fitzhardinge <jeremy@goop.org> 2006
 */

#define pr_fmt(fmt) fmt

#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>

extern const struct bug_entry __start___bug_table[], __stop___bug_table[];

static inline unsigned long bug_addr(const struct bug_entry *bug)
{
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
	return bug->bug_addr;
#else
	return (unsigned long)bug + bug->bug_addr_disp;
#endif
}

#ifdef CONFIG_MODULES
/* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list);

static const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
	struct module *mod;
	const struct bug_entry *bug = NULL;

	rcu_read_lock();
	list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
		unsigned i;

		bug = mod->bug_table;
		for (i = 0; i < mod->num_bugs; ++i, ++bug)
			if (bugaddr == bug_addr(bug))
				goto out;
	}
	bug = NULL;
out:
	rcu_read_unlock();

	return bug;
}

void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
			 struct module *mod)
{
	char *secstrings;
	unsigned int i;

	mod->bug_table = NULL;
	mod->num_bugs = 0;

	/* Find the __bug_table section, if present */
	secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
	for (i = 1; i < hdr->e_shnum; i++) {
		if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
			continue;
		mod->bug_table = (void *) sechdrs[i].sh_addr;
		mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
		break;
	}

	/*
	 * Strictly speaking this should have a spinlock to protect against
	 * traversals, but since we only traverse on BUG()s, a spinlock
	 * could potentially lead to deadlock and thus be counter-productive.
	 * Thus, this uses RCU to safely manipulate the bug list, since BUG
	 * must run in non-interruptive state.
	 */
	list_add_rcu(&mod->bug_list, &module_bug_list);
}

void module_bug_cleanup(struct module *mod)
{
	list_del_rcu(&mod->bug_list);
}

#else

static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
{
	return NULL;
}
#endif

const struct bug_entry *find_bug(unsigned long bugaddr)
{
	const struct bug_entry *bug;

	for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
		if (bugaddr == bug_addr(bug))
			return bug;

	return module_find_bug(bugaddr);
}

enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
	const struct bug_entry *bug;
	const char *file;
	unsigned line, warning;

	if (!is_valid_bugaddr(bugaddr))
		return BUG_TRAP_TYPE_NONE;

	bug = find_bug(bugaddr);

	file = NULL;
	line = 0;
	warning = 0;

	if (bug) {
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
		file = bug->file;
#else
		file = (const char *)bug + bug->file_disp;
#endif
		line = bug->line;
#endif
		warning = (bug->flags & BUGFLAG_WARNING) != 0;
	}

	if (warning) {
		/* this is a WARN_ON rather than BUG/BUG_ON */
		pr_warn("------------[ cut here ]------------\n");

		if (file)
			pr_warn("WARNING: at %s:%u\n", file, line);
		else
			pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
				(void *)bugaddr);

		print_modules();
		show_regs(regs);
		print_oops_end_marker();
		/* Just a warning, don't kill lockdep. */
		add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK);
		return BUG_TRAP_TYPE_WARN;
	}

	printk(KERN_DEFAULT "------------[ cut here ]------------\n");

	if (file)
		pr_crit("kernel BUG at %s:%u!\n", file, line);
	else
		pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
			(void *)bugaddr);

	return BUG_TRAP_TYPE_BUG;
}
back to top