@@ -37,7 +37,6 @@ extern struct thread_info *current_set[NR_CPUS];
#define prepare_arch_switch(next) do { \
__asm__ __volatile__( \
- ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp\n\t" \
@@ -384,11 +384,8 @@ do_flush_windows:
RESTORE_ALL
- .globl flush_patch_one
-
/* We get these for debugging routines using __builtin_return_address() */
dfw_kernel:
-flush_patch_one:
FLUSH_ALL_KERNEL_WINDOWS
/* Advance over the trap instruction. */
@@ -558,10 +555,9 @@ sys_rt_sigreturn:
* XXX code just like on sparc64... -DaveM
*/
.align 4
- .globl sys_fork, flush_patch_two
+ .globl sys_fork
sys_fork:
mov %o7, %l5
-flush_patch_two:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
@@ -574,10 +570,9 @@ flush_patch_two:
mov %l5, %o7
/* Whee, kernel threads! */
- .globl sys_clone, flush_patch_three
+ .globl sys_clone
sys_clone:
mov %o7, %l5
-flush_patch_three:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
@@ -590,9 +585,8 @@ flush_patch_three:
mov %l5, %o7
/* Whee, real vfork! */
- .globl sys_vfork, flush_patch_four
+ .globl sys_vfork
sys_vfork:
-flush_patch_four:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
@@ -909,17 +903,7 @@ breakpoint_trap:
#endif
.align 4
- .globl flush_patch_exception
-flush_patch_exception:
- FLUSH_ALL_KERNEL_WINDOWS;
- ldd [%o0], %o6
- jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
- mov 1, %g1 ! signal EFAULT condition
-
- .align 4
- .globl kill_user_windows, kuw_patch1_7win
- .globl kuw_patch1
-kuw_patch1_7win: sll %o3, 6, %o3
+ .globl kill_user_windows
/* No matter how much overhead this routine has in the worst
* case scenario, it is several times better than taking the
@@ -939,11 +923,11 @@ kill_user_windows:
be 4f ! yep, we are done
rd %wim, %o3 ! get current wim
srl %o3, 1, %o4 ! simulate a save
-kuw_patch1:
+kuw_next:
sll %o3, 7, %o3 ! compute next wim
or %o4, %o3, %o3 ! result
andncc %o0, %o3, %o0 ! clean this bit in umask
- bne kuw_patch1 ! not done yet
+ bne kuw_next ! not done yet
srl %o3, 1, %o4 ! begin another save simulation
wr %o3, 0x0, %wim ! set the new wim
st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
@@ -30,18 +30,6 @@
.text
.align 4
- /* SEVEN WINDOW PATCH INSTRUCTIONS */
- .globl tsetup_7win_patch1, tsetup_7win_patch2
- .globl tsetup_7win_patch3, tsetup_7win_patch4
- .globl tsetup_7win_patch5, tsetup_7win_patch6
-tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
-tsetup_7win_patch2: and %g2, 0x7f, %g2
-tsetup_7win_patch3: and %g2, 0x7f, %g2
-tsetup_7win_patch4: and %g1, 0x7f, %g1
-tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
-tsetup_7win_patch6: and %g2, 0x7f, %g2
- /* END OF PATCH INSTRUCTIONS */
-
/* At trap time, interrupts and all generic traps do the
* following:
*
@@ -72,9 +60,7 @@ tsetup_7win_patch6: and %g2, 0x7f, %g2
* trap pc and npc, and %l3 contains the trap time %wim.
*/
- .globl trap_setup, tsetup_patch1, tsetup_patch2
- .globl tsetup_patch3, tsetup_patch4
- .globl tsetup_patch5, tsetup_patch6
+ .globl trap_setup
trap_setup:
/* Calculate mask of trap window. See if from user
* or kernel and branch conditionally.
@@ -109,11 +95,10 @@ trap_setup_kernel_spill:
* %wim and go.
*/
srl %t_wim, 0x1, %g2 ! begin computation of new %wim
-tsetup_patch1:
- sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
+
+ sll %t_wim, 0x7, %t_wim
or %t_wim, %g2, %g2
-tsetup_patch2:
- and %g2, 0xff, %g2 ! patched on 7 window Sparcs
+ and %g2, 0xff, %g2
save %g0, %g0, %g0
@@ -185,8 +170,7 @@ trap_setup_from_user:
sub %g2, 0x1, %g2
1:
andn %g2, %t_twinmask, %g2
-tsetup_patch3:
- and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ and %g2, 0xff, %g2
st %g2, [%curptr + TI_UWINMASK] ! store new umask
jmpl %t_retpc + 0x8, %g0 ! return to caller
@@ -199,14 +183,11 @@ trap_setup_user_spill:
* is in %g1 upon entry to here.
*/
-tsetup_patch4:
- and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
+ and %g1, 0xff, %g1
srl %t_wim, 0x1, %g2 ! compute new %wim
-tsetup_patch5:
- sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
+ sll %t_wim, 0x7, %t_wim
or %t_wim, %g2, %g2 ! %g2 is new %wim
-tsetup_patch6:
- and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ and %g2, 0xff, %g2
andn %g1, %g2, %g1 ! clear this bit in %g1
st %g1, [%curptr + TI_UWINMASK]
@@ -404,71 +404,6 @@ leon_init:
wr %g1, 0x0, %wim ! make window 1 invalid
WRITE_PAUSE
- cmp %g3, 0x7
- bne 2f
- nop
-
- /* Adjust our window handling routines to
- * do things correctly on 7 window Sparcs.
- */
-
-#define PATCH_INSN(src, dest) \
- set src, %g5; \
- set dest, %g2; \
- ld [%g5], %g4; \
- st %g4, [%g2];
-
- /* Patch for window spills... */
- PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
- PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
- PATCH_INSN(spnwin_patch3_7win, spnwin_patch3)
-
- /* Patch for window fills... */
- PATCH_INSN(fnwin_patch1_7win, fnwin_patch1)
- PATCH_INSN(fnwin_patch2_7win, fnwin_patch2)
-
- /* Patch for trap entry setup... */
- PATCH_INSN(tsetup_7win_patch1, tsetup_patch1)
- PATCH_INSN(tsetup_7win_patch2, tsetup_patch2)
- PATCH_INSN(tsetup_7win_patch3, tsetup_patch3)
- PATCH_INSN(tsetup_7win_patch4, tsetup_patch4)
- PATCH_INSN(tsetup_7win_patch5, tsetup_patch5)
- PATCH_INSN(tsetup_7win_patch6, tsetup_patch6)
-
- /* Patch for returning from traps... */
- PATCH_INSN(rtrap_7win_patch1, rtrap_patch1)
- PATCH_INSN(rtrap_7win_patch2, rtrap_patch2)
- PATCH_INSN(rtrap_7win_patch3, rtrap_patch3)
- PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
- PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
-
- /* Patch for killing user windows from the register file. */
- PATCH_INSN(kuw_patch1_7win, kuw_patch1)
-
- /* Now patch the kernel window flush sequences.
- * This saves 2 traps on every switch and fork.
- */
- set 0x01000000, %g4
- set flush_patch_one, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
- set flush_patch_two, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
- set flush_patch_three, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
- set flush_patch_four, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
- set flush_patch_exception, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
- set flush_patch_switch, %g5
- st %g4, [%g5 + 0x18]
- st %g4, [%g5 + 0x1c]
-
-2:
sethi %hi(nwindows), %g4
st %g3, [%g4 + %lo(nwindows)] ! store final value
sub %g3, 0x1, %g3
@@ -23,15 +23,6 @@
#define glob_tmp g4
#define curptr g6
- /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
- .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
- .globl rtrap_7win_patch4, rtrap_7win_patch5
-rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
-rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
-rtrap_7win_patch3: srl %g1, 7, %g2
-rtrap_7win_patch4: srl %g2, 6, %g2
-rtrap_7win_patch5: and %g1, 0x7f, %g1
- /* END OF PATCH INSTRUCTIONS */
/* We need to check for a few things which are:
* 1) The need to call schedule() because this
@@ -117,17 +108,17 @@ ret_trap_nobufwins:
bne ret_trap_userwins_ok
nop
- /* Calculate new %wim, we have to pull a register
- * window from the users stack.
- */
+ /* Calculate new %wim, we have to pull a register
+ * window from the users stack.
+ */
ret_trap_pull_one_window:
- rd %wim, %t_wim
- sll %t_wim, 0x1, %twin_tmp1
-rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
- or %glob_tmp, %twin_tmp1, %glob_tmp
-rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
+ rd %wim, %t_wim
+ sll %t_wim, 0x1, %twin_tmp1
+ srl %t_wim, 0x7, %glob_tmp
+ or %glob_tmp, %twin_tmp1, %glob_tmp
+ and %glob_tmp, 0xff, %glob_tmp
- wr %glob_tmp, 0x0, %wim
+ wr %glob_tmp, 0x0, %wim
/* Here comes the architecture specific
* branch to the user stack checking routine
@@ -174,20 +165,20 @@ ret_trap_unaligned_pc:
ld [%curptr + TI_FLAGS], %g2
ret_trap_kernel:
- /* Will the rett land us in the invalid window? */
- mov 2, %g1
- sll %g1, %t_psr, %g1
-rtrap_patch3: srl %g1, 8, %g2
- or %g1, %g2, %g1
- rd %wim, %g2
- andcc %g2, %g1, %g0
- be 1f ! Nope, just return from the trap
- sll %g2, 0x1, %g1
-
- /* We have to grab a window before returning. */
-rtrap_patch4: srl %g2, 7, %g2
- or %g1, %g2, %g1
-rtrap_patch5: and %g1, 0xff, %g1
+ /* Will the rett land us in the invalid window? */
+ mov 2, %g1
+ sll %g1, %t_psr, %g1
+ srl %g1, 8, %g2
+ or %g1, %g2, %g1
+ rd %wim, %g2
+ andcc %g2, %g1, %g0
+ be 1f ! Nope, just return from the trap
+ sll %g2, 0x1, %g1
+
+ /* We have to grab a window before returning. */
+ srl %g2, 7, %g2
+ or %g1, %g2, %g1
+ and %g1, 0xff, %g1
wr %g1, 0x0, %wim
@@ -43,18 +43,6 @@
#define twin_tmp l4 /* Temp reg, only usable in trap window T */
#define glob_tmp g5 /* Global temporary reg, usable anywhere G */
- .text
- .align 4
- /* BEGINNING OF PATCH INSTRUCTIONS */
- /* On a 7-window Sparc the boot code patches spnwin_*
- * instructions with the following ones.
- */
- .globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win
-spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp
-spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp
-spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
- /* END OF PATCH INSTRUCTIONS */
-
/* The trap entry point has done the following:
*
* rd %psr, %l0
@@ -69,7 +57,6 @@ spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
* will be all zeroes.
*/
.globl spill_window_entry
- .globl spnwin_patch1, spnwin_patch2, spnwin_patch3
spill_window_entry:
/* LOCATION: Trap Window */
@@ -81,10 +68,10 @@ spill_window_entry:
*
* newwim = ((%wim>>1) | (%wim<<(nwindows - 1)));
*/
- srl %t_wim, 0x1, %twin_tmp
-spnwin_patch1: sll %t_wim, 7, %glob_tmp
- or %glob_tmp, %twin_tmp, %glob_tmp
-spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
+ srl %t_wim, 0x1, %twin_tmp
+ sll %t_wim, 7, %glob_tmp
+ or %glob_tmp, %twin_tmp, %glob_tmp
+ and %glob_tmp, 0xff, %glob_tmp
/* The trap entry point has set the condition codes
* up for us to see if this is from user or kernel.
@@ -222,17 +209,17 @@ spwin_user_stack_is_bolixed:
mov 1, %twin_tmp
st %twin_tmp, [%curptr + TI_W_SAVED]
- /* Compute new user window mask. What we are basically
- * doing is taking two windows, the invalid one at trap
- * time and the one we attempted to throw onto the users
- * stack, and saying that everything else is an ok user
- * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
- */
- rd %wim, %twin_tmp
- or %twin_tmp, %t_wim, %twin_tmp
- not %twin_tmp
-spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
- st %twin_tmp, [%curptr + TI_UWINMASK]
+ /* Compute new user window mask. What we are basically
+ * doing is taking two windows, the invalid one at trap
+ * time and the one we attempted to throw onto the users
+ * stack, and saying that everything else is an ok user
+ * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
+ */
+ rd %wim, %twin_tmp
+ or %twin_tmp, %t_wim, %twin_tmp
+ not %twin_tmp
+ and %twin_tmp, 0xff, %twin_tmp
+ st %twin_tmp, [%curptr + TI_UWINMASK]
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
@@ -68,27 +68,17 @@
* are done and return from trap if successful
*/
- /* BEGINNING OF PATCH INSTRUCTIONS */
-
- /* On 7-window Sparc the boot code patches fnwin_patch1
- * with the following instruction.
- */
- .globl fnwin_patch1_7win, fnwin_patch2_7win
-fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2
-fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1
- /* END OF PATCH INSTRUCTIONS */
-
- .globl fill_window_entry, fnwin_patch1, fnwin_patch2
+ .globl fill_window_entry
fill_window_entry:
/* LOCATION: Window 'T' */
/* Compute what the new %wim is going to be if we retrieve
* the proper window off of the stack.
*/
- sll %t_wim, 1, %twin_tmp1
-fnwin_patch1: srl %t_wim, 7, %twin_tmp2
- or %twin_tmp1, %twin_tmp2, %twin_tmp1
-fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1
+ sll %t_wim, 1, %twin_tmp1
+ srl %t_wim, 7, %twin_tmp2
+ or %twin_tmp1, %twin_tmp2, %twin_tmp1
+ and %twin_tmp1, 0xff, %twin_tmp1
wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */