?? entry.s
字號:
/*------------------------------------------------------------------------------ * Native PARISC/Linux Project (http://www.puffingroup.com/parisc) * * kernel entry points (interruptions, system call wrappers) * Copyright (C) 1999,2000 Philipp Rumpf * Copyright (C) 1999 SuSE GmbH Nuernberg * Copyright (C) 2000 Hewlett-Packard (John Marvin) * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <linux/config.h>#include <asm/offset.h>/* the following is the setup i think we should follow: * whenever the CPU is interruptible, the following has to be true: * CR30 is the kernel sp or 0 if we currently use the kernel stack * CR31 is the kernel gp */ /* we have the following possibilities to act on an interruption: * - handle in assembly and use shadowed registers only * - save registers to kernel stack and handle in assembly or C */ .text#ifdef __LP64__ .level 2.0w#endif#define __ASSEMBLY__#include <asm/assembly.h> /* for LDREG/STREG defines */#include <asm/pgtable.h>#include <asm/psw.h>#include <asm/signal.h>#ifdef __LP64__#define FRAME_SIZE 64#else#define FRAME_SIZE 64#endif /* Switch to virtual mapping, trashing only %r1 */ .macro virt_map rfi_type mtsm %r0 tovirt %r29 tovirt %r30 mfsp %sr7, %r1 mtsp %r1, %sr3 mtsp %r0, %sr4 mtsp %r0, %sr5 mtsp %r0, %sr6 mtsp %r0, %sr7 ldil L%KERNEL_PSW, %r1 ldo R%KERNEL_PSW(%r1), %r1 LDIL_FIXUP(%r1) mtctl %r1, %cr22 mtctl %r0, %cr17 mtctl %r0, %cr17 ldil L%.+28, %r1 ldo R%.+24(%r1), %r1 LDIL_FIXUP(%r1) mtctl %r1, %cr18 ldo 4(%r1), %r1 mtctl %r1, %cr18 \rfi_type nop .endm .macro get_stack mfctl %cr30, %r1 comib,=,n 0, %r1, 0f /* forward so predicted not taken */ /* we save the registers in the task struct */ ldo TASK_REGS(%r1), %r29 tophys %r29 STREG %r30, PT_GR30(%r29) STREG %r1, PT_CR30(%r29) ldo TASK_SZ_ALGN(%r1), %r30 b 1f /* unconditional so predicted taken */ mtctl %r0,%cr300: /* we put a struct pt_regs on the stack and save the registers there */ copy %r30,%r29 ldo PT_SZ_ALGN(%r30),%r30 tophys %r29 STREG %r30,PT_GR30(%r29) STREG %r0,PT_CR30(%r29)1: .endm .macro rest_stack regs LDREG PT_CR30(\regs), %r1 comib,=,n 0, %r1, 2f/* forward so predicted not taken */ /* we restore the registers out of the task struct */ mtctl %r1, %cr30 LDREG PT_GR1(\regs), %r1 LDREG PT_GR30(\regs),%r30 b 3f LDREG PT_GR29(\regs),%r292: /* we take a struct pt_regs off the stack */ LDREG PT_GR1(\regs), %r1 LDREG PT_GR29(\regs), %r29 ldo -PT_SZ_ALGN(%r30), %r303: .endm#ifdef OLD /* fixme interruption handler */ .macro def code /* WARNING!!! THIS IS DEBUG CODE ONLY!!! */ b unimplemented_64bitirq ldi \code, %r1 .align 32 .endm /* Use def to enable break - KWDB wants em * (calls traps.c:handle_interruption) */ .macro pass_break code#else /* default interruption handler * (calls traps.c:handle_interruption) */ .macro def code#endif mtctl %r29, %cr31 mtctl %r1, %cr28 ldi \code, %r1 b intr_save mtctl %r1, %cr29 .align 32 .endm /* Interrupt interruption handler * (calls irq.c:do_irq_mask) */ .macro extint code mtctl %r29, %cr31 mtctl %r1, %cr28 mfctl %cr23, %r1 mtctl %r1, %cr23 b intr_extint mtctl %r1, %cr29 .align 32 .endm .import os_hpmc, code /* HPMC handler */ .macro hpmc code nop /* must be a NOP, will be patched later */ ldil L%PA(os_hpmc), %r3 ldo R%PA(os_hpmc)(%r3), %r3 bv,n 0(%r3) nop .word 0 /* checksum (will be patched) */ .word PA(os_hpmc) /* address of handler */ .word 0 /* length of handler */ .endm /* * Performance Note: Instructions will be moved up into * this part of the code later on, once we are sure * that the tlb miss handlers are close to final form. */ /* Register definitions for tlb miss handler macros */ va = r8 /* virtual address for which the trap occured */ spc = r24 /* space for which the trap occured */#ifndef __LP64__ /* * itlb miss interruption handler (parisc 1.1 - 32 bit) */ .macro itlb_11 code mfctl %pcsq, spc b itlb_miss_11 mfctl %pcoq, va .align 32 .endm#endif /* * itlb miss interruption handler (parisc 2.0) */ .macro itlb_20 code mfctl %pcsq, spc#ifdef __LP64__ b itlb_miss_20w#else b itlb_miss_20#endif mfctl %pcoq, va .align 32 .endm #ifndef __LP64__ /* * naitlb miss interruption handler (parisc 1.1 - 32 bit) * * Note: naitlb misses will be treated * as an ordinary itlb miss for now. * However, note that naitlb misses * have the faulting address in the * IOR/ISR. */ .macro naitlb_11 code mfctl %isr,spc b itlb_miss_11 mfctl %ior,va /* FIXME: If user causes a naitlb miss, the priv level may not be in * lower bits of va, where the itlb miss handler is expecting them */ .align 32 .endm#endif /* * naitlb miss interruption handler (parisc 2.0) * * Note: naitlb misses will be treated * as an ordinary itlb miss for now. * However, note that naitlb misses * have the faulting address in the * IOR/ISR. */ .macro naitlb_20 code mfctl %isr,spc#ifdef __LP64__ b itlb_miss_20w#else b itlb_miss_20#endif mfctl %ior,va /* FIXME: If user causes a naitlb miss, the priv level may not be in * lower bits of va, where the itlb miss handler is expecting them */ .align 32 .endm #ifndef __LP64__ /* * dtlb miss interruption handler (parisc 1.1 - 32 bit) */ .macro dtlb_11 code mfctl %isr, spc b dtlb_miss_11 mfctl %ior, va .align 32 .endm#endif /* * dtlb miss interruption handler (parisc 2.0) */ .macro dtlb_20 code mfctl %isr, spc#ifdef __LP64__ b dtlb_miss_20w#else b dtlb_miss_20#endif mfctl %ior, va .align 32 .endm #ifndef __LP64__ /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) * * Note: nadtlb misses will be treated * as an ordinary dtlb miss for now. * */ .macro nadtlb_11 code mfctl %isr,spc b dtlb_miss_11 mfctl %ior,va .align 32 .endm#endif /* nadtlb miss interruption handler (parisc 2.0) * * Note: nadtlb misses will be treated * as an ordinary dtlb miss for now. * */ .macro nadtlb_20 code mfctl %isr,spc#ifdef __LP64__ b dtlb_miss_20w#else b dtlb_miss_20#endif mfctl %ior,va .align 32 .endm #ifndef __LP64__ /* * dirty bit trap interruption handler (parisc 1.1 - 32 bit) */ .macro dbit_11 code mfctl %isr,spc b dbit_trap_11 mfctl %ior,va .align 32 .endm#endif /* * dirty bit trap interruption handler (parisc 2.0) */ .macro dbit_20 code mfctl %isr,spc#ifdef __LP64__ b dbit_trap_20w#else b dbit_trap_20#endif mfctl %ior,va .align 32 .endm /* * Align fault_vector_20 on 4K boundary so that both * fault_vector_11 and fault_vector_20 are on the * same page. This is only necessary as long as we * write protect the kernel text, which we may stop * doing once we use large parge translations to cover * the static part of the kernel address space. */ .export fault_vector_20 .align 4096fault_vector_20: /* First vector is invalid (0) */ .ascii "cows can fly" .byte 0 .align 32 hpmc 1 def 2 def 3 extint 4 def 5 itlb_20 6 def 7 def 8 def 9 def 10 def 11 def 12 def 13 def 14 dtlb_20 15 naitlb_20 16 nadtlb_20 17 def 18 def 19 dbit_20 20 def 21 def 22 def 23 def 24 def 25 def 26 def 27 def 28 def 29 def 30 def 31#ifndef __LP64__ .export fault_vector_11 .align 2048fault_vector_11: /* First vector is invalid (0) */ .ascii "cows can fly" .byte 0 .align 32 hpmc 1 def 2 def 3 extint 4 def 5 itlb_11 6 def 7 def 8 def 9 def 10 def 11 def 12 def 13 def 14 dtlb_11 15 naitlb_11 16 nadtlb_11 17 def 18 def 19 dbit_11 20 def 21 def 22 def 23 def 24 def 25 def 26 def 27 def 28 def 29 def 30 def 31#endif .import handle_interruption,code .import handle_real_interruption,code .import do_irq_mask,code .import parisc_stopkernel,code .import cpu_irq_region,data /* * r26 = function to be called * r25 = argument to pass in * r24 = flags for do_fork() * * Kernel threads don't ever return, so they don't need * a true register context. We just save away the arguments * for copy_thread/ret_ to properly set up the child. */#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ .export __kernel_thread, code .import do_fork__kernel_thread: STREG %r2, -RP_OFFSET(%r30) copy %r30, %r1 ldo PT_SZ_ALGN(%r30),%r30#ifdef __LP64__ /* Yo, function pointers in wide mode are little structs... -PB */ /* XXX FIXME do we need to honor the fptr's %dp value too? */ ldd 16(%r26), %r26#endif STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ STREG %r25, PT_GR25(%r1) ldo CLONE_VM(%r0), %r26 /* Force CLONE_VM since only init_mm */ or %r26, %r24, %r26 /* will have kernel mappings. */ copy %r0, %r25 bl do_fork, %r2 copy %r1, %r24 /* Parent Returns here */ ldo -PT_SZ_ALGN(%r30), %r30 LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) nop /* * Child Returns here * * copy_thread moved args from temp save area set up above * into task save area. */ .export ret_from_kernel_threadret_from_kernel_thread: LDREG TASK_PT_GR26-TASK_SZ_ALGN(%r30), %r1 LDREG TASK_PT_GR25-TASK_SZ_ALGN(%r30), %r26 ble 0(%sr7, %r1) copy %r31, %r2 b sys_exit ldi 0, %r26 .import sys_execve, code .export __execve, code__execve: copy %r2, %r15 copy %r23, %r17 copy %r30, %r16 ldo PT_SZ_ALGN(%r30), %r30 STREG %r26, PT_GR26(%r16) STREG %r25, PT_GR25(%r16) STREG %r24, PT_GR24(%r16) bl sys_execve, %r2 copy %r16, %r26 comib,<>,n 0,%r28,__execve_failed b intr_return STREG %r17, PT_CR30(%r16)__execve_failed: /* yes, this will trap and die. */ copy %r15, %r2 bv %r0(%r2) nop .align 4 /* * struct task_struct *_switch_to(struct task_struct *prev, * struct task_struct *next) * * switch kernel stacks and return prev */ .export _switch_to, code_switch_to: STREG %r2, -RP_OFFSET(%r30) callee_save ldil L%_switch_to_ret, %r2 ldo R%_switch_to_ret(%r2), %r2 LDIL_FIXUP(%r2) STREG %r2, TASK_PT_KPC(%r26) LDREG TASK_PT_KPC(%r25), %r2 STREG %r30, TASK_PT_KSP(%r26) LDREG TASK_PT_KSP(%r25), %r30 bv %r0(%r2) nop_switch_to_ret: mtctl %r0, %cr0 /* Needed for single stepping */ callee_rest LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) copy %r26, %r28 /* * Common rfi return path for interruptions, kernel execve, and some * syscalls. The sys_rt_sigreturn syscall will return via this path * if the signal was received when the process was running; if the * process was blocked on a syscall then the normal syscall_exit * path is used. All syscalls for traced proceses exit via * intr_restore. * Note that the following code uses a "relied upon translation". See * the parisc ACD for details. The ssm is necessary due to a PCXT bug. */ .align 4096 .export syscall_exit_rfisyscall_exit_rfi: copy %r30,%r16 /* FIXME! depi below has hardcoded dependency on kernel stack size */ depi 0,31,14,%r16 /* get task pointer */ ldo TASK_REGS(%r16),%r16 /* Force iaoq to userspace, as the user has had access to our current * context via sigcontext. * XXX do we need any other protection here? */ LDREG PT_IAOQ0(%r16),%r19 depi 3,31,2,%r19 STREG %r19,PT_IAOQ0(%r16) LDREG PT_IAOQ1(%r16),%r19 depi 3,31,2,%r19 STREG %r19,PT_IAOQ1(%r16) intr_return: /* Check for software interrupts */ .import irq_stat,data
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -