]>
Commit | Line | Data |
---|---|---|
4e93cb00 MG |
1 | /* |
2 | * linux/arch/arm/mm/tlb-v7.S | |
3 | * | |
4 | * Copyright (C) 1997-2002 Russell King | |
5 | * Modified for ARMv7 by Catalin Marinas | |
6 | * This Edition is maintained by Matthew Veety (aliasxerog) <mveety@gmail.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * ARM architecture version 6 TLB handling functions. | |
13 | * These assume a split I/D TLB. | |
14 | */ | |
15 | #include <linux/linkage.h> | |
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/tlbflush.h> | |
19 | #include "proc-macros.S" | |
20 | ||
21 | /* | |
22 | * v7wbi_flush_user_tlb_range(start, end, vma) | |
23 | * | |
24 | * Invalidate a range of TLB entries in the specified address space. | |
25 | * | |
26 | * - start - start address (may not be aligned) | |
27 | * - end - end address (exclusive, may not be aligned) | |
28 | * - vma - vma_struct describing address range | |
29 | * | |
30 | * It is assumed that: | |
31 | * - the "Invalidate single entry" instruction will invalidate | |
32 | * both the I and the D TLBs on Harvard-style TLBs | |
33 | */ | |
34 | ENTRY(v7wbi_flush_user_tlb_range) | |
35 | vma_vm_mm r3, r2 @ get vma->vm_mm | |
36 | mmid r3, r3 @ get vm_mm->context.id | |
37 | dsb | |
38 | mov r0, r0, lsr #PAGE_SHIFT @ align address | |
39 | mov r1, r1, lsr #PAGE_SHIFT | |
40 | asid r3, r3 @ mask ASID | |
41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | |
42 | mov r1, r1, lsl #PAGE_SHIFT | |
43 | vma_vm_flags r2, r2 @ get vma->vm_flags | |
44 | 1: | |
45 | mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) | |
46 | tst r2, #VM_EXEC @ Executable area ? | |
47 | mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) | |
48 | add r0, r0, #PAGE_SZ | |
49 | cmp r0, r1 | |
50 | blo 1b | |
51 | mov ip, #0 | |
52 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | |
53 | dsb | |
54 | mov pc, lr | |
55 | ENDPROC(v7wbi_flush_user_tlb_range) | |
56 | ||
57 | /* | |
58 | * v7wbi_flush_kern_tlb_range(start,end) | |
59 | * | |
60 | * Invalidate a range of kernel TLB entries | |
61 | * | |
62 | * - start - start address (may not be aligned) | |
63 | * - end - end address (exclusive, may not be aligned) | |
64 | */ | |
65 | ENTRY(v7wbi_flush_kern_tlb_range) | |
66 | dsb | |
67 | mov r0, r0, lsr #PAGE_SHIFT @ align address | |
68 | mov r1, r1, lsr #PAGE_SHIFT | |
69 | mov r0, r0, lsl #PAGE_SHIFT | |
70 | mov r1, r1, lsl #PAGE_SHIFT | |
71 | 1: | |
72 | mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA | |
73 | mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA | |
74 | add r0, r0, #PAGE_SZ | |
75 | cmp r0, r1 | |
76 | blo 1b | |
77 | mov r2, #0 | |
78 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | |
79 | dsb | |
80 | isb | |
81 | mov pc, lr | |
82 | ENDPROC(v7wbi_flush_kern_tlb_range) | |
83 | ||
84 | .section ".text.init", #alloc, #execinstr | |
85 | ||
86 | .type v7wbi_tlb_fns, #object | |
87 | ENTRY(v7wbi_tlb_fns) | |
88 | .long v7wbi_flush_user_tlb_range | |
89 | .long v7wbi_flush_kern_tlb_range | |
90 | .long v6wbi_tlb_flags | |
91 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |