patch-2.4.17 linux/arch/sparc64/kernel/rtrap.S

Next file: linux/arch/sparc64/kernel/semaphore.c
Previous file: linux/arch/sparc64/kernel/process.c
Back to the patch index
Back to the overall index

diff -Naur -X /home/marcelo/lib/dontdiff linux-2.4.16/arch/sparc64/kernel/rtrap.S linux/arch/sparc64/kernel/rtrap.S
@@ -1,4 +1,4 @@
-/* $Id: rtrap.S,v 1.56 2001/10/13 00:14:34 kanoj Exp $
+/* $Id: rtrap.S,v 1.57 2001/12/06 00:16:11 davem Exp $
  * rtrap.S: Preparing for return from trap on Sparc V9.
  *
  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -18,14 +18,13 @@
 #define		RTRAP_PSTATE_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
 #define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
 
-#if 0
-#define		RTRAP_CHECK		call rtrap_check;  add %sp, (STACK_BIAS+REGWIN_SZ), %o0;
-#else
-#define		RTRAP_CHECK
-#endif
+		/* Register %l6 keeps track of whether we are returning
+		 * from a system call or not.  It is cleared if we call
+		 * do_signal, and it must not be otherwise modified until
+		 * we fully commit to returning to userspace.
+		 */
 
 		.text
-
 		.align			32
 __handle_softirq:
 		call			do_softirq
@@ -34,42 +33,101 @@
 		 nop
 __handle_preemption:
 		call			schedule
-		 nop
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
 		ba,pt			%xcc, __handle_preemption_continue
-		 nop
+		 wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+
 __handle_user_windows:
-		wrpr			%g0, RTRAP_PSTATE, %pstate
 		call			fault_in_user_windows
-		 nop
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
 		ba,pt			%xcc, __handle_user_windows_continue
+		 wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		/* Redo sched+sig checks */
+		ldx			[%g6 + AOFF_task_need_resched], %l0
+		brz,pt			%l0, 1f
+		 nop
+		call			schedule
+
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+1:		lduw			[%g6 + AOFF_task_sigpending], %l0
+		brz,pt			%l0, __handle_user_windows_continue
 		 nop
+		clr			%o0
+		mov			%l5, %o2
+		mov			%l6, %o3
+
+		add			%sp, STACK_BIAS + REGWIN_SZ, %o1
+		call			do_signal
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		clr			%l6
+		/* Signal delivery can modify pt_regs tstate, so we must
+		 * reload it.
+		 */
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		sethi			%hi(0xf << 20), %l4
+		and			%l1, %l4, %l4
+
+		ba,pt			%xcc, __handle_user_windows_continue
+		 andn			%l1, %l4, %l1
 __handle_perfctrs:
-		/* Don't forget to preserve user window invariants. */
-		wrpr			%g0, RTRAP_PSTATE, %pstate
 		call			update_perfctrs
-		 nop
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
 		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
 		ldub			[%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
-		brz,pt			%o2, __handle_perfctrs_continue
-		 sethi			%hi(TSTATE_PEF), %l6
-		wrpr			%g0, RTRAP_PSTATE, %pstate
+		brz,pt			%o2, 1f
+		 nop
 
+		/* Redo userwin+sched+sig checks */
 		call			fault_in_user_windows
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+1:		ldx			[%g6 + AOFF_task_need_resched], %l0
+		brz,pt			%l0, 1f
 		 nop
+		call			schedule
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+1:		lduw			[%g6 + AOFF_task_sigpending], %l0
+		brz,pt			%l0, __handle_perfctrs_continue
+		 sethi			%hi(TSTATE_PEF), %o0
+		clr			%o0
+		mov			%l5, %o2
+		mov			%l6, %o3
+		add			%sp, STACK_BIAS + REGWIN_SZ, %o1
+
+		call			do_signal
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
+		clr			%l6
+		/* Signal delivery can modify pt_regs tstate, so we must
+		 * reload it.
+		 */
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		sethi			%hi(0xf << 20), %l4
+		and			%l1, %l4, %l4
+		andn			%l1, %l4, %l1
+
 		ba,pt			%xcc, __handle_perfctrs_continue
-		 nop
+		 sethi			%hi(TSTATE_PEF), %o0
 __handle_userfpu:
 		rd			%fprs, %l5
 		andcc			%l5, FPRS_FEF, %g0
+		sethi			%hi(TSTATE_PEF), %o0
 		be,a,pn			%icc, __handle_userfpu_continue
-		 andn			%l1, %l6, %l1
+		 andn			%l1, %o0, %l1
 		ba,a,pt			%xcc, __handle_userfpu_continue
+
 __handle_signal:
 		clr			%o0
 		mov			%l5, %o2
 		mov			%l6, %o3
+		add			%sp, STACK_BIAS + REGWIN_SZ, %o1
 		call			do_signal
-		 add			%sp, STACK_BIAS + REGWIN_SZ, %o1
+		 wrpr			%g0, RTRAP_PSTATE, %pstate
+		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
 		clr			%l6
 
 		/* Signal delivery can modify pt_regs tstate, so we must
@@ -99,29 +157,43 @@
 		and			%l1, %l4, %l4
 		bne,pn			%icc, to_kernel
 		 andn			%l1, %l4, %l1
-to_user:	ldx			[%g6 + AOFF_task_need_resched], %l0
 
-		brnz,pn			%l0, __handle_preemption
+		/* We must hold IRQs off and atomically test schedule+signal
+		 * state, then hold them off all the way back to userspace.
+		 * If we are returning to kernel, none of this matters.
+		 *
+		 * If we do not do this, there is a window where we would do
+		 * the tests, later the signal/resched event arrives but we do
+		 * not process it since we are still in kernel mode.  It would
+		 * take until the next local IRQ before the signal/resched
+		 * event would be handled.
+		 *
+		 * This also means that if we have to deal with performance
+		 * counters or user windows, we have to redo all of these
+		 * sched+signal checks with IRQs disabled.
+		 */
+to_user:	wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
 __handle_preemption_continue:
+		ldx			[%g6 + AOFF_task_need_resched], %l0
+		brnz,pn			%l0, __handle_preemption
 		 lduw			[%g6 + AOFF_task_sigpending], %l0
 		brnz,pn			%l0, __handle_signal
 		 nop
 __handle_signal_continue:
-check_user_wins:
-		wrpr			%g0, RTRAP_PSTATE_IRQOFF, %pstate
 		ldub			[%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
 		brnz,pn			%o2, __handle_user_windows
-		 sethi			%hi(TSTATE_PEF), %l6
-
+		 nop
 __handle_user_windows_continue:
-		RTRAP_CHECK
 		ldub			[%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
 		andcc			%l5, SPARC_FLAG_PERFCTR, %g0
+		sethi			%hi(TSTATE_PEF), %o0
 		bne,pn			%xcc, __handle_perfctrs
 __handle_perfctrs_continue:
-		 andcc			%l1, %l6, %g0
+		 andcc			%l1, %o0, %g0
+
+		/* This fpdepth clear is neccessary for non-syscall rtraps only */
 		bne,pn			%xcc, __handle_userfpu
-		 stb			%g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
+		 stb			%g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
 __handle_userfpu_continue:
 
 rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)