Subversion Repositories shark

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
/* asm-generic/tlb.h
2
 *
3
 *      Generic TLB shootdown code
4
 *
5
 * Copyright 2001 Red Hat, Inc.
6
 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7
 *
8
 * This program is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU General Public License
10
 * as published by the Free Software Foundation; either version
11
 * 2 of the License, or (at your option) any later version.
12
 */
13
#ifndef _ASM_GENERIC__TLB_H
14
#define _ASM_GENERIC__TLB_H
15
 
16
#include <linux/config.h>
17
#include <linux/swap.h>
18
#include <asm/tlbflush.h>
19
 
20
/*
21
 * For UP we don't need to worry about TLB flush
22
 * and page free order so much..
23
 */
24
#ifdef CONFIG_SMP
25
  #define FREE_PTE_NR   506
26
  #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
27
#else
28
  #define FREE_PTE_NR   1
29
  #define tlb_fast_mode(tlb) 1
30
#endif
31
 
32
/* struct mmu_gather is an opaque type used by the mm code for passing around
33
 * any data needed by arch specific code for tlb_remove_page.  This structure
34
 * can be per-CPU or per-MM as the page table lock is held for the duration of
35
 * TLB shootdown.
36
 */
37
struct mmu_gather {
38
        struct mm_struct        *mm;
39
        unsigned int            nr;     /* set to ~0U means fast mode */
40
        unsigned int            need_flush;/* Really unmapped some ptes? */
41
        unsigned int            fullmm; /* non-zero means full mm flush */
42
        unsigned long           freed;
43
        struct page *           pages[FREE_PTE_NR];
44
};
45
 
46
/* Users of the generic TLB shootdown code must declare this storage space. */
47
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
48
 
49
/* tlb_gather_mmu
50
 *      Return a pointer to an initialized struct mmu_gather.
51
 */
52
static inline struct mmu_gather *
53
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
54
{
55
        struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
56
 
57
        tlb->mm = mm;
58
 
59
        /* Use fast mode if only one CPU is online */
60
        tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
61
 
62
        tlb->fullmm = full_mm_flush;
63
        tlb->freed = 0;
64
 
65
        return tlb;
66
}
67
 
68
static inline void
69
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
70
{
71
        if (!tlb->need_flush)
72
                return;
73
        tlb->need_flush = 0;
74
        tlb_flush(tlb);
75
        if (!tlb_fast_mode(tlb)) {
76
                free_pages_and_swap_cache(tlb->pages, tlb->nr);
77
                tlb->nr = 0;
78
        }
79
}
80
 
81
/* tlb_finish_mmu
82
 *      Called at the end of the shootdown operation to free up any resources
83
 *      that were required.  The page table lock is still held at this point.
84
 */
85
static inline void
86
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
87
{
88
        int freed = tlb->freed;
89
        struct mm_struct *mm = tlb->mm;
90
        int rss = mm->rss;
91
 
92
        if (rss < freed)
93
                freed = rss;
94
        mm->rss = rss - freed;
95
        tlb_flush_mmu(tlb, start, end);
96
 
97
        /* keep the page table cache within bounds */
98
        check_pgt_cache();
99
}
100
 
101
 
102
/* tlb_remove_page
103
 *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
104
 *      handling the additional races in SMP caused by other CPUs caching valid
105
 *      mappings in their TLBs.
106
 */
107
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
108
{
109
        tlb->need_flush = 1;
110
        if (tlb_fast_mode(tlb)) {
111
                free_page_and_swap_cache(page);
112
                return;
113
        }
114
        tlb->pages[tlb->nr++] = page;
115
        if (tlb->nr >= FREE_PTE_NR)
116
                tlb_flush_mmu(tlb, 0, 0);
117
}
118
 
119
/**
120
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
121
 *
122
 * Record the fact that pte's were really umapped in ->need_flush, so we can
123
 * later optimise away the tlb invalidate.   This helps when userspace is
124
 * unmapping already-unmapped pages, which happens quite a lot.
125
 */
126
#define tlb_remove_tlb_entry(tlb, ptep, address)                \
127
        do {                                                    \
128
                tlb->need_flush = 1;                            \
129
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
130
        } while (0)
131
 
132
#define pte_free_tlb(tlb, ptep)                                 \
133
        do {                                                    \
134
                tlb->need_flush = 1;                            \
135
                __pte_free_tlb(tlb, ptep);                      \
136
        } while (0)
137
 
138
#define pmd_free_tlb(tlb, pmdp)                                 \
139
        do {                                                    \
140
                tlb->need_flush = 1;                            \
141
                __pmd_free_tlb(tlb, pmdp);                      \
142
        } while (0)
143
 
144
#endif /* _ASM_GENERIC__TLB_H */