Revision e0fa7ab42232e742dcb3de9f3c1f6127b5adc019 authored by Riccardo Mancini on 15 July 2021, 16:07:25 UTC, committed by Arnaldo Carvalho de Melo on 18 July 2021, 12:27:37 UTC
ASan reports some memory leaks when running:

  # perf test "42: BPF filter"

This second leak is caused by a strlist not being dellocated on error
inside probe_file__del_events.

This patch adds a goto label before the deallocation and makes the error
path jump to it.

Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
Fixes: e7895e422e4da63d ("perf probe: Split del_perf_probe_events()")
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/174963c587ae77fa108af794669998e4ae558338.1626343282.git.rickyman7@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
1 parent 937654c
Raw File
fs_pin.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "internal.h"
#include "mount.h"

static DEFINE_SPINLOCK(pin_lock);

void pin_remove(struct fs_pin *pin)
{
	spin_lock(&pin_lock);
	hlist_del_init(&pin->m_list);
	hlist_del_init(&pin->s_list);
	spin_unlock(&pin_lock);
	spin_lock_irq(&pin->wait.lock);
	pin->done = 1;
	wake_up_locked(&pin->wait);
	spin_unlock_irq(&pin->wait.lock);
}

void pin_insert(struct fs_pin *pin, struct vfsmount *m)
{
	spin_lock(&pin_lock);
	hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins);
	hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins);
	spin_unlock(&pin_lock);
}

void pin_kill(struct fs_pin *p)
{
	wait_queue_entry_t wait;

	if (!p) {
		rcu_read_unlock();
		return;
	}
	init_wait(&wait);
	spin_lock_irq(&p->wait.lock);
	if (likely(!p->done)) {
		p->done = -1;
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		p->kill(p);
		return;
	}
	if (p->done > 0) {
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		return;
	}
	__add_wait_queue(&p->wait, &wait);
	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&p->wait.lock);
		rcu_read_unlock();
		schedule();
		rcu_read_lock();
		if (likely(list_empty(&wait.entry)))
			break;
		/* OK, we know p couldn't have been freed yet */
		spin_lock_irq(&p->wait.lock);
		if (p->done > 0) {
			spin_unlock_irq(&p->wait.lock);
			break;
		}
	}
	rcu_read_unlock();
}

void mnt_pin_kill(struct mount *m)
{
	while (1) {
		struct hlist_node *p;
		rcu_read_lock();
		p = READ_ONCE(m->mnt_pins.first);
		if (!p) {
			rcu_read_unlock();
			break;
		}
		pin_kill(hlist_entry(p, struct fs_pin, m_list));
	}
}

void group_pin_kill(struct hlist_head *p)
{
	while (1) {
		struct hlist_node *q;
		rcu_read_lock();
		q = READ_ONCE(p->first);
		if (!q) {
			rcu_read_unlock();
			break;
		}
		pin_kill(hlist_entry(q, struct fs_pin, s_list));
	}
}
back to top