]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/init.h> | |
2 | #include <linux/linkage.h> | |
3 | ||
4 | #include <asm/assembler.h> | |
5 | #include <asm/asm-offsets.h> | |
6 | #include <asm/errno.h> | |
7 | #include <asm/thread_info.h> | |
8 | ||
9 | @ Bad Abort numbers | |
10 | @ ----------------- | |
11 | @ | |
12 | #define BAD_PREFETCH 0 | |
13 | #define BAD_DATA 1 | |
14 | #define BAD_ADDREXCPTN 2 | |
15 | #define BAD_IRQ 3 | |
16 | #define BAD_UNDEFINSTR 4 | |
17 | ||
18 | @ | |
19 | @ Most of the stack format comes from struct pt_regs, but with | |
20 | @ the addition of 8 bytes for storing syscall args 5 and 6. | |
21 | @ This _must_ remain a multiple of 8 for EABI. | |
22 | @ | |
23 | #define S_OFF 8 | |
24 | ||
25 | /* | |
26 | * The SWI code relies on the fact that R0 is at the bottom of the stack | |
27 | * (due to slow/fast restore user regs). | |
28 | */ | |
29 | #if S_R0 != 0 | |
30 | #error "Please fix" | |
31 | #endif | |
32 | ||
33 | .macro zero_fp | |
34 | #ifdef CONFIG_FRAME_POINTER | |
35 | mov fp, #0 | |
36 | #endif | |
37 | .endm | |
38 | ||
39 | .macro alignment_trap, rtemp | |
40 | #ifdef CONFIG_ALIGNMENT_TRAP | |
41 | ldr \rtemp, .LCcralign | |
42 | ldr \rtemp, [\rtemp] | |
43 | mcr p15, 0, \rtemp, c1, c0 | |
44 | #endif | |
45 | .endm | |
46 | ||
47 | @ | |
48 | @ Store/load the USER SP and LR registers by switching to the SYS | |
49 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | |
50 | @ available. Should only be called from SVC mode | |
51 | @ | |
52 | .macro store_user_sp_lr, rd, rtemp, offset = 0 | |
53 | mrs \rtemp, cpsr | |
54 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
55 | msr cpsr_c, \rtemp @ switch to the SYS mode | |
56 | ||
57 | str sp, [\rd, #\offset] @ save sp_usr | |
58 | str lr, [\rd, #\offset + 4] @ save lr_usr | |
59 | ||
60 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
61 | msr cpsr_c, \rtemp @ switch back to the SVC mode | |
62 | .endm | |
63 | ||
64 | .macro load_user_sp_lr, rd, rtemp, offset = 0 | |
65 | mrs \rtemp, cpsr | |
66 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
67 | msr cpsr_c, \rtemp @ switch to the SYS mode | |
68 | ||
69 | ldr sp, [\rd, #\offset] @ load sp_usr | |
70 | ldr lr, [\rd, #\offset + 4] @ load lr_usr | |
71 | ||
72 | eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) | |
73 | msr cpsr_c, \rtemp @ switch back to the SVC mode | |
74 | .endm | |
75 | ||
76 | #ifndef CONFIG_THUMB2_KERNEL | |
77 | .macro svc_exit, rpsr | |
78 | msr spsr_cxsf, \rpsr | |
79 | #if defined(CONFIG_CPU_V6) | |
80 | ldr r0, [sp] | |
81 | strex r1, r2, [sp] @ clear the exclusive monitor | |
82 | ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr | |
83 | #elif defined(CONFIG_CPU_32v6K) | |
84 | clrex @ clear the exclusive monitor | |
85 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
86 | #else | |
87 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
88 | #endif | |
89 | .endm | |
90 | ||
91 | .macro restore_user_regs, fast = 0, offset = 0 | |
92 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | |
93 | ldr lr, [sp, #\offset + S_PC]! @ get pc | |
94 | msr spsr_cxsf, r1 @ save in spsr_svc | |
95 | #if defined(CONFIG_CPU_V6) | |
96 | strex r1, r2, [sp] @ clear the exclusive monitor | |
97 | #elif defined(CONFIG_CPU_32v6K) | |
98 | clrex @ clear the exclusive monitor | |
99 | #endif | |
100 | .if \fast | |
101 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | |
102 | .else | |
103 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr | |
104 | .endif | |
105 | mov r0, r0 @ ARMv5T and earlier require a nop | |
106 | @ after ldm {}^ | |
107 | add sp, sp, #S_FRAME_SIZE - S_PC | |
108 | movs pc, lr @ return & move spsr_svc into cpsr | |
109 | .endm | |
110 | ||
111 | .macro get_thread_info, rd | |
112 | mov \rd, sp, lsr #13 | |
113 | mov \rd, \rd, lsl #13 | |
114 | .endm | |
115 | ||
116 | @ | |
117 | @ 32-bit wide "mov pc, reg" | |
118 | @ | |
119 | .macro movw_pc, reg | |
120 | mov pc, \reg | |
121 | .endm | |
122 | #else /* CONFIG_THUMB2_KERNEL */ | |
123 | .macro svc_exit, rpsr | |
124 | clrex @ clear the exclusive monitor | |
125 | ldr r0, [sp, #S_SP] @ top of the stack | |
126 | ldr r1, [sp, #S_PC] @ return address | |
127 | tst r0, #4 @ orig stack 8-byte aligned? | |
128 | stmdb r0, {r1, \rpsr} @ rfe context | |
129 | ldmia sp, {r0 - r12} | |
130 | ldr lr, [sp, #S_LR] | |
131 | addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned | |
132 | addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned | |
133 | rfeia sp! | |
134 | .endm | |
135 | ||
136 | .macro restore_user_regs, fast = 0, offset = 0 | |
137 | clrex @ clear the exclusive monitor | |
138 | mov r2, sp | |
139 | load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr | |
140 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | |
141 | ldr lr, [sp, #\offset + S_PC] @ get pc | |
142 | add sp, sp, #\offset + S_SP | |
143 | msr spsr_cxsf, r1 @ save in spsr_svc | |
144 | .if \fast | |
145 | ldmdb sp, {r1 - r12} @ get calling r1 - r12 | |
146 | .else | |
147 | ldmdb sp, {r0 - r12} @ get calling r0 - r12 | |
148 | .endif | |
149 | add sp, sp, #S_FRAME_SIZE - S_SP | |
150 | movs pc, lr @ return & move spsr_svc into cpsr | |
151 | .endm | |
152 | ||
153 | .macro get_thread_info, rd | |
154 | mov \rd, sp | |
155 | lsr \rd, \rd, #13 | |
156 | mov \rd, \rd, lsl #13 | |
157 | .endm | |
158 | ||
159 | @ | |
160 | @ 32-bit wide "mov pc, reg" | |
161 | @ | |
162 | .macro movw_pc, reg | |
163 | mov pc, \reg | |
164 | nop | |
165 | .endm | |
166 | #endif /* !CONFIG_THUMB2_KERNEL */ | |
167 | ||
168 | @ | |
169 | @ Debug exceptions are taken as prefetch or data aborts. | |
170 | @ We must disable preemption during the handler so that | |
171 | @ we can access the debug registers safely. | |
172 | @ | |
173 | .macro debug_entry, fsr | |
174 | #if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) | |
175 | ldr r4, =0x40f @ mask out fsr.fs | |
176 | and r5, r4, \fsr | |
177 | cmp r5, #2 @ debug exception | |
178 | bne 1f | |
179 | get_thread_info r10 | |
180 | ldr r6, [r10, #TI_PREEMPT] @ get preempt count | |
181 | add r11, r6, #1 @ increment it | |
182 | str r11, [r10, #TI_PREEMPT] | |
183 | 1: | |
184 | #endif | |
185 | .endm | |
186 | ||
187 | /* | |
188 | * These are the registers used in the syscall handler, and allow us to | |
189 | * have in theory up to 7 arguments to a function - r0 to r6. | |
190 | * | |
191 | * r7 is reserved for the system call number for thumb mode. | |
192 | * | |
193 | * Note that tbl == why is intentional. | |
194 | * | |
195 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | |
196 | */ | |
197 | scno .req r7 @ syscall number | |
198 | tbl .req r8 @ syscall table pointer | |
199 | why .req r8 @ Linux syscall (!= 0) | |
200 | tsk .req r9 @ current thread_info |