]>
Commit | Line | Data |
---|---|---|
4e93cb00 MG |
1 | /* |
2 | * linux/arch/arm/mm/tlb-v7.S | |
3 | * | |
4 | * Copyright (C) 1997-2002 Russell King | |
5 | * Modified for ARMv7 by Catalin Marinas | |
4e93cb00 MG |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * ARM architecture version 6 TLB handling functions. | |
12 | * These assume a split I/D TLB. | |
13 | */ | |
5db3fe53 | 14 | #include <linux/init.h> |
4e93cb00 MG |
15 | #include <linux/linkage.h> |
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/tlbflush.h> | |
19 | #include "proc-macros.S" | |
20 | ||
21 | /* | |
22 | * v7wbi_flush_user_tlb_range(start, end, vma) | |
23 | * | |
24 | * Invalidate a range of TLB entries in the specified address space. | |
25 | * | |
26 | * - start - start address (may not be aligned) | |
27 | * - end - end address (exclusive, may not be aligned) | |
28 | * - vma - vma_struct describing address range | |
29 | * | |
30 | * It is assumed that: | |
31 | * - the "Invalidate single entry" instruction will invalidate | |
32 | * both the I and the D TLBs on Harvard-style TLBs | |
33 | */ | |
34 | ENTRY(v7wbi_flush_user_tlb_range) | |
35 | vma_vm_mm r3, r2 @ get vma->vm_mm | |
36 | mmid r3, r3 @ get vm_mm->context.id | |
37 | dsb | |
38 | mov r0, r0, lsr #PAGE_SHIFT @ align address | |
39 | mov r1, r1, lsr #PAGE_SHIFT | |
40 | asid r3, r3 @ mask ASID | |
41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | |
42 | mov r1, r1, lsl #PAGE_SHIFT | |
43 | vma_vm_flags r2, r2 @ get vma->vm_flags | |
44 | 1: | |
5db3fe53 MG |
45 | #ifdef CONFIG_SMP |
46 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | |
47 | #else | |
48 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | |
49 | #endif | |
4e93cb00 MG |
50 | add r0, r0, #PAGE_SZ |
51 | cmp r0, r1 | |
52 | blo 1b | |
53 | mov ip, #0 | |
54 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | |
55 | dsb | |
56 | mov pc, lr | |
57 | ENDPROC(v7wbi_flush_user_tlb_range) | |
58 | ||
59 | /* | |
60 | * v7wbi_flush_kern_tlb_range(start,end) | |
61 | * | |
62 | * Invalidate a range of kernel TLB entries | |
63 | * | |
64 | * - start - start address (may not be aligned) | |
65 | * - end - end address (exclusive, may not be aligned) | |
66 | */ | |
67 | ENTRY(v7wbi_flush_kern_tlb_range) | |
68 | dsb | |
69 | mov r0, r0, lsr #PAGE_SHIFT @ align address | |
70 | mov r1, r1, lsr #PAGE_SHIFT | |
71 | mov r0, r0, lsl #PAGE_SHIFT | |
72 | mov r1, r1, lsl #PAGE_SHIFT | |
73 | 1: | |
5db3fe53 MG |
74 | #ifdef CONFIG_SMP |
75 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | |
76 | #else | |
77 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | |
78 | #endif | |
4e93cb00 MG |
79 | add r0, r0, #PAGE_SZ |
80 | cmp r0, r1 | |
81 | blo 1b | |
82 | mov r2, #0 | |
83 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | |
84 | dsb | |
85 | isb | |
86 | mov pc, lr | |
87 | ENDPROC(v7wbi_flush_kern_tlb_range) | |
88 | ||
5db3fe53 | 89 | __INIT |
4e93cb00 MG |
90 | |
91 | .type v7wbi_tlb_fns, #object | |
92 | ENTRY(v7wbi_tlb_fns) | |
93 | .long v7wbi_flush_user_tlb_range | |
94 | .long v7wbi_flush_kern_tlb_range | |
5db3fe53 | 95 | .long v7wbi_tlb_flags |
4e93cb00 | 96 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |