]> cvs.zerfleddert.de Git - ms2-kexec/blob - entry-header.S
don't be so verbose
[ms2-kexec] / entry-header.S
1 #include <linux/init.h>
2 #include <linux/linkage.h>
3
4 #include <asm/assembler.h>
5 #include <asm/asm-offsets.h>
6 #include <asm/errno.h>
7 #include <asm/thread_info.h>
8
9 @ Bad Abort numbers
10 @ -----------------
11 @
12 #define BAD_PREFETCH 0
13 #define BAD_DATA 1
14 #define BAD_ADDREXCPTN 2
15 #define BAD_IRQ 3
16 #define BAD_UNDEFINSTR 4
17
18 @
19 @ Most of the stack format comes from struct pt_regs, but with
20 @ the addition of 8 bytes for storing syscall args 5 and 6.
21 @ This _must_ remain a multiple of 8 for EABI.
22 @
23 #define S_OFF 8
24
25 /*
26 * The SWI code relies on the fact that R0 is at the bottom of the stack
27 * (due to slow/fast restore user regs).
28 */
29 #if S_R0 != 0
30 #error "Please fix"
31 #endif
32
33 .macro zero_fp
34 #ifdef CONFIG_FRAME_POINTER
35 mov fp, #0
36 #endif
37 .endm
38
39 .macro alignment_trap, rtemp
40 #ifdef CONFIG_ALIGNMENT_TRAP
41 ldr \rtemp, .LCcralign
42 ldr \rtemp, [\rtemp]
43 mcr p15, 0, \rtemp, c1, c0
44 #endif
45 .endm
46
47 @
48 @ Store/load the USER SP and LR registers by switching to the SYS
49 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
50 @ available. Should only be called from SVC mode
51 @
52 .macro store_user_sp_lr, rd, rtemp, offset = 0
53 mrs \rtemp, cpsr
54 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
55 msr cpsr_c, \rtemp @ switch to the SYS mode
56
57 str sp, [\rd, #\offset] @ save sp_usr
58 str lr, [\rd, #\offset + 4] @ save lr_usr
59
60 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
61 msr cpsr_c, \rtemp @ switch back to the SVC mode
62 .endm
63
64 .macro load_user_sp_lr, rd, rtemp, offset = 0
65 mrs \rtemp, cpsr
66 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
67 msr cpsr_c, \rtemp @ switch to the SYS mode
68
69 ldr sp, [\rd, #\offset] @ load sp_usr
70 ldr lr, [\rd, #\offset + 4] @ load lr_usr
71
72 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
73 msr cpsr_c, \rtemp @ switch back to the SVC mode
74 .endm
75
76 #ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr
78 msr spsr_cxsf, \rpsr
79 #if defined(CONFIG_CPU_32v6K)
80 clrex @ clear the exclusive monitor
81 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
82 #elif defined (CONFIG_CPU_V6)
83 ldr r0, [sp]
84 strex r1, r2, [sp] @ clear the exclusive monitor
85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
86 #else
87 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
88 #endif
89 .endm
90
91 .macro restore_user_regs, fast = 0, offset = 0
92 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
93 ldr lr, [sp, #\offset + S_PC]! @ get pc
94 msr spsr_cxsf, r1 @ save in spsr_svc
95 #if defined(CONFIG_CPU_32v6K)
96 clrex @ clear the exclusive monitor
97 #elif defined (CONFIG_CPU_V6)
98 strex r1, r2, [sp] @ clear the exclusive monitor
99 #endif
100 .if \fast
101 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
102 .else
103 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
104 .endif
105 add sp, sp, #S_FRAME_SIZE - S_PC
106 movs pc, lr @ return & move spsr_svc into cpsr
107 .endm
108
109 .macro get_thread_info, rd
110 mov \rd, sp, lsr #13
111 mov \rd, \rd, lsl #13
112 .endm
113
114 @
115 @ 32-bit wide "mov pc, reg"
116 @
117 .macro movw_pc, reg
118 mov pc, \reg
119 .endm
120 #else /* CONFIG_THUMB2_KERNEL */
121 .macro svc_exit, rpsr
122 clrex @ clear the exclusive monitor
123 ldr r0, [sp, #S_SP] @ top of the stack
124 ldr r1, [sp, #S_PC] @ return address
125 tst r0, #4 @ orig stack 8-byte aligned?
126 stmdb r0, {r1, \rpsr} @ rfe context
127 ldmia sp, {r0 - r12}
128 ldr lr, [sp, #S_LR]
129 addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
130 addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
131 rfeia sp!
132 .endm
133
134 .macro restore_user_regs, fast = 0, offset = 0
135 clrex @ clear the exclusive monitor
136 mov r2, sp
137 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
138 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
139 ldr lr, [sp, #\offset + S_PC] @ get pc
140 add sp, sp, #\offset + S_SP
141 msr spsr_cxsf, r1 @ save in spsr_svc
142 .if \fast
143 ldmdb sp, {r1 - r12} @ get calling r1 - r12
144 .else
145 ldmdb sp, {r0 - r12} @ get calling r0 - r12
146 .endif
147 add sp, sp, #S_FRAME_SIZE - S_SP
148 movs pc, lr @ return & move spsr_svc into cpsr
149 .endm
150
151 .macro get_thread_info, rd
152 mov \rd, sp
153 lsr \rd, \rd, #13
154 mov \rd, \rd, lsl #13
155 .endm
156
157 @
158 @ 32-bit wide "mov pc, reg"
159 @
160 .macro movw_pc, reg
161 mov pc, \reg
162 nop
163 .endm
164 #endif /* !CONFIG_THUMB2_KERNEL */
165
166 /*
167 * These are the registers used in the syscall handler, and allow us to
168 * have in theory up to 7 arguments to a function - r0 to r6.
169 *
170 * r7 is reserved for the system call number for thumb mode.
171 *
172 * Note that tbl == why is intentional.
173 *
174 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
175 */
176 scno .req r7 @ syscall number
177 tbl .req r8 @ syscall table pointer
178 why .req r8 @ Linux syscall (!= 0)
179 tsk .req r9 @ current thread_info
Impressum, Datenschutz