Subversion Repositories shark

Rev

Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
#ifndef _GENERIC_RMAP_H
2
#define _GENERIC_RMAP_H
3
/*
4
 * linux/include/asm-generic/rmap.h
5
 *
6
 * Architecture dependent parts of the reverse mapping code,
7
 * this version should work for most architectures with a
8
 * 'normal' page table layout.
9
 *
10
 * We use the struct page of the page table page to find out
11
 * the process and full address of a page table entry:
12
 * - page->mapping points to the process' mm_struct
13
 * - page->index has the high bits of the address
14
 * - the lower bits of the address are calculated from the
15
 *   offset of the page table entry within the page table page
16
 *
17
 * For CONFIG_HIGHPTE, we need to represent the address of a pte in a
18
 * scalar pte_addr_t.  The pfn of the pte's page is shifted left by PAGE_SIZE
19
 * bits and is then ORed with the byte offset of the pte within its page.
20
 *
21
 * For CONFIG_HIGHMEM4G, the pte_addr_t is 32 bits.  20 for the pfn, 12 for
22
 * the offset.
23
 *
24
 * For CONFIG_HIGHMEM64G, the pte_addr_t is 64 bits.  52 for the pfn, 12 for
25
 * the offset.
26
 */
27
#include <linux/mm.h>
28
 
29
static inline void pgtable_add_rmap(struct page * page, struct mm_struct * mm, unsigned long address)
30
{
31
#ifdef BROKEN_PPC_PTE_ALLOC_ONE
32
        /* OK, so PPC calls pte_alloc() before mem_map[] is setup ... ;( */
33
        extern int mem_init_done;
34
 
35
        if (!mem_init_done)
36
                return;
37
#endif
38
        page->mapping = (void *)mm;
39
        page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1);
40
        inc_page_state(nr_page_table_pages);
41
}
42
 
43
static inline void pgtable_remove_rmap(struct page * page)
44
{
45
        page->mapping = NULL;
46
        page->index = 0;
47
        dec_page_state(nr_page_table_pages);
48
}
49
 
50
static inline struct mm_struct * ptep_to_mm(pte_t * ptep)
51
{
52
        struct page * page = kmap_atomic_to_page(ptep);
53
        return (struct mm_struct *) page->mapping;
54
}
55
 
56
static inline unsigned long ptep_to_address(pte_t * ptep)
57
{
58
        struct page * page = kmap_atomic_to_page(ptep);
59
        unsigned long low_bits;
60
        low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE;
61
        return page->index + low_bits;
62
}
63
 
64
#ifdef CONFIG_HIGHPTE
65
static inline pte_addr_t ptep_to_paddr(pte_t *ptep)
66
{
67
        pte_addr_t paddr;
68
        paddr = ((pte_addr_t)page_to_pfn(kmap_atomic_to_page(ptep))) << PAGE_SHIFT;
69
        return paddr + (pte_addr_t)((unsigned long)ptep & ~PAGE_MASK);
70
}
71
#else
72
static inline pte_addr_t ptep_to_paddr(pte_t *ptep)
73
{
74
        return (pte_addr_t)ptep;
75
}
76
#endif
77
 
78
#ifndef CONFIG_HIGHPTE
79
static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr)
80
{
81
        return (pte_t *)pte_paddr;
82
}
83
 
84
static inline void rmap_ptep_unmap(pte_t *pte)
85
{
86
        return;
87
}
88
#endif
89
 
90
#endif /* _GENERIC_RMAP_H */