Revision dc280d93623927570da279e99393879dbbab39e7 authored by Thomas Gleixner on 21 December 2016, 19:19:49 UTC, committed by Thomas Gleixner on 25 December 2016, 09:47:42 UTC
Developers manage to overwrite states blindly without thought. That's fatal
and hard to debug. Add sanity checks to make it fail.

This requries to restructure the code so that the dynamic state allocation
happens in the same lock protected section as the actual store. Otherwise
the previous assignment of 'Reserved' to the name field would trigger the
overwrite check.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Link: http://lkml.kernel.org/r/20161221192111.675234535@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

1 parent 59fefd0
Raw File
fixup.S
/*
 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
 *
 *  Copyright (C) 2004  Randolph Chung <tausq@debian.org>
 *
 *    This program is free software; you can redistribute it and/or modify
 *    it under the terms of the GNU General Public License as published by
 *    the Free Software Foundation; either version 2, or (at your option)
 *    any later version.
 *
 *    This program is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    GNU General Public License for more details.
 *
 *    You should have received a copy of the GNU General Public License
 *    along with this program; if not, write to the Free Software
 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 * 
 * Fixup routines for kernel exception handling.
 */
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>

#ifdef CONFIG_SMP
	.macro  get_fault_ip t1 t2
	loadgp
	addil LT%__per_cpu_offset,%r27
	LDREG RT%__per_cpu_offset(%r1),\t1
	/* t2 = smp_processor_id() */
	mfctl 30,\t2
	ldw TI_CPU(\t2),\t2
#ifdef CONFIG_64BIT
	extrd,u \t2,63,32,\t2
#endif
	/* t2 = &__per_cpu_offset[smp_processor_id()]; */
	LDREGX \t2(\t1),\t2 
	addil LT%exception_data,%r27
	LDREG RT%exception_data(%r1),\t1
	/* t1 = this_cpu_ptr(&exception_data) */
	add,l \t1,\t2,\t1
	/* %r27 = t1->fault_gp - restore gp */
	LDREG EXCDATA_GP(\t1), %r27
	/* t1 = t1->fault_ip */
	LDREG EXCDATA_IP(\t1), \t1
	.endm
#else
	.macro  get_fault_ip t1 t2
	loadgp
	/* t1 = this_cpu_ptr(&exception_data) */
	addil LT%exception_data,%r27
	LDREG RT%exception_data(%r1),\t2
	/* %r27 = t2->fault_gp - restore gp */
	LDREG EXCDATA_GP(\t2), %r27
	/* t1 = t2->fault_ip */
	LDREG EXCDATA_IP(\t2), \t1
	.endm
#endif

	.level LEVEL

	.text
	.section .fixup, "ax"

	/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
ENTRY_CFI(fixup_get_user_skip_1)
	get_fault_ip %r1,%r8
	ldo 4(%r1), %r1
	ldi -EFAULT, %r8
	bv %r0(%r1)
	copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_1)

ENTRY_CFI(fixup_get_user_skip_2)
	get_fault_ip %r1,%r8
	ldo 8(%r1), %r1
	ldi -EFAULT, %r8
	bv %r0(%r1)
	copy %r0, %r9
ENDPROC_CFI(fixup_get_user_skip_2)

	/* put_user() fixups, store -EFAULT in r8 */
ENTRY_CFI(fixup_put_user_skip_1)
	get_fault_ip %r1,%r8
	ldo 4(%r1), %r1
	bv %r0(%r1)
	ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_1)

ENTRY_CFI(fixup_put_user_skip_2)
	get_fault_ip %r1,%r8
	ldo 8(%r1), %r1
	bv %r0(%r1)
	ldi -EFAULT, %r8
ENDPROC_CFI(fixup_put_user_skip_2)

back to top