Subversion Repositories shark

Rev

Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
/*
2
 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3
 *
4
 */
5
 
6
#ifndef _ASM_MMZONE_H_
7
#define _ASM_MMZONE_H_
8
 
9
#include <asm/smp.h>
10
 
11
#ifdef CONFIG_DISCONTIGMEM
12
 
13
extern struct pglist_data *node_data[];
14
 
15
/*
16
 * Following are macros that are specific to this numa platform.
17
 */
18
#define reserve_bootmem(addr, size) \
19
        reserve_bootmem_node(NODE_DATA(0), (addr), (size))
20
#define alloc_bootmem(x) \
21
        __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
22
#define alloc_bootmem_low(x) \
23
        __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
24
#define alloc_bootmem_pages(x) \
25
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
26
#define alloc_bootmem_low_pages(x) \
27
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
28
#define alloc_bootmem_node(ignore, x) \
29
        __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
30
#define alloc_bootmem_pages_node(ignore, x) \
31
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
32
#define alloc_bootmem_low_pages_node(ignore, x) \
33
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
34
 
35
#define node_localnr(pfn, nid)          ((pfn) - node_data[nid]->node_start_pfn)
36
 
37
/*
38
 * Following are macros that each numa implmentation must define.
39
 */
40
 
41
/*
42
 * Given a kernel address, find the home node of the underlying memory.
43
 */
44
#define kvaddr_to_nid(kaddr)    pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
45
 
46
/*
47
 * Return a pointer to the node data for node n.
48
 */
49
#define NODE_DATA(nid)          (node_data[nid])
50
 
51
#define node_mem_map(nid)       (NODE_DATA(nid)->node_mem_map)
52
#define node_start_pfn(nid)     (NODE_DATA(nid)->node_start_pfn)
53
#define node_end_pfn(nid)                                               \
54
({                                                                      \
55
        pg_data_t *__pgdat = NODE_DATA(nid);                            \
56
        __pgdat->node_start_pfn + __pgdat->node_spanned_pages;          \
57
})
58
 
59
#define local_mapnr(kvaddr)                                             \
60
({                                                                      \
61
        unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT;               \
62
        (__pfn - node_start_pfn(pfn_to_nid(__pfn)));                    \
63
})
64
 
65
#define kern_addr_valid(kaddr)                                          \
66
({                                                                      \
67
        unsigned long __kaddr = (unsigned long)(kaddr);                 \
68
        pg_data_t *__pgdat = NODE_DATA(kvaddr_to_nid(__kaddr));         \
69
        test_bit(local_mapnr(__kaddr), __pgdat->valid_addr_bitmap);     \
70
})
71
 
72
#define pfn_to_page(pfn)                                                \
73
({                                                                      \
74
        unsigned long __pfn = pfn;                                      \
75
        int __node  = pfn_to_nid(__pfn);                                \
76
        &node_mem_map(__node)[node_localnr(__pfn,__node)];              \
77
})
78
 
79
#define page_to_pfn(pg)                                                 \
80
({                                                                      \
81
        struct page *__page = pg;                                       \
82
        struct zone *__zone = page_zone(__page);                        \
83
        (unsigned long)(__page - __zone->zone_mem_map)                  \
84
                + __zone->zone_start_pfn;                               \
85
})
86
#define pmd_page(pmd)           (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
87
/*
88
 * pfn_valid should be made as fast as possible, and the current definition
89
 * is valid for machines that are NUMA, but still contiguous, which is what
90
 * is currently supported. A more generalised, but slower definition would
91
 * be something like this - mbligh:
92
 * ( pfn_to_pgdat(pfn) && ((pfn) < node_end_pfn(pfn_to_nid(pfn))) )
93
 */
94
#define pfn_valid(pfn)          ((pfn) < num_physpages)
95
 
96
/*
97
 * generic node memory support, the following assumptions apply:
98
 *
99
 * 1) memory comes in 256Mb contigious chunks which are either present or not
100
 * 2) we will not have more than 64Gb in total
101
 *
102
 * for now assume that 64Gb is max amount of RAM for whole system
103
 *    64Gb / 4096bytes/page = 16777216 pages
104
 */
105
#define MAX_NR_PAGES 16777216
106
#define MAX_ELEMENTS 256
107
#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
108
 
109
extern u8 physnode_map[];
110
 
111
static inline int pfn_to_nid(unsigned long pfn)
112
{
113
        return(physnode_map[(pfn) / PAGES_PER_ELEMENT]);
114
}
115
static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn)
116
{
117
        return(NODE_DATA(pfn_to_nid(pfn)));
118
}
119
 
120
#ifdef CONFIG_X86_NUMAQ
121
#include <asm/numaq.h>
122
#elif CONFIG_ACPI_SRAT
123
#include <asm/srat.h>
124
#elif CONFIG_X86_PC
125
#define get_zholes_size(n) (0)
126
#else
127
#define pfn_to_nid(pfn)         (0)
128
#endif /* CONFIG_X86_NUMAQ */
129
 
130
extern int get_memcfg_numa_flat(void );
131
/*
132
 * This allows any one NUMA architecture to be compiled
133
 * for, and still fall back to the flat function if it
134
 * fails.
135
 */
136
static inline void get_memcfg_numa(void)
137
{
138
#ifdef CONFIG_X86_NUMAQ
139
        if (get_memcfg_numaq())
140
                return;
141
#elif CONFIG_ACPI_SRAT
142
        if (get_memcfg_from_srat())
143
                return;
144
#endif
145
 
146
        get_memcfg_numa_flat();
147
}
148
 
149
#endif /* CONFIG_DISCONTIGMEM */
150
#endif /* _ASM_MMZONE_H_ */