Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/vm/hat_pte.h
+++ new/usr/src/uts/i86pc/vm/hat_pte.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #ifndef _VM_HAT_PTE_H
27 27 #define _VM_HAT_PTE_H
28 28
29 -#pragma ident "%Z%%M% %I% %E% SMI"
30 -
31 29 #ifdef __cplusplus
32 30 extern "C" {
33 31 #endif
34 32
35 33 #include <sys/types.h>
36 34 #include <sys/mach_mmu.h>
37 35
38 36 /*
39 37 * macros to get/set/clear the PTE fields
40 38 */
41 39 #define PTE_SET(p, f) ((p) |= (f))
42 40 #define PTE_CLR(p, f) ((p) &= ~(x86pte_t)(f))
43 41 #define PTE_GET(p, f) ((p) & (f))
44 42
45 43 /*
46 44 * Handy macro to check if a pagetable entry or pointer is valid
47 45 */
48 46 #define PTE_ISVALID(p) PTE_GET(p, PT_VALID)
49 47
50 48 /*
51 49 * Does a PTE map a large page.
52 50 */
53 51 #define PTE_IS_LGPG(p, l) ((l) > 0 && PTE_GET((p), PT_PAGESIZE))
54 52
55 53 /*
56 54 * does this PTE represent a page (not a pointer to another page table)?
57 55 */
58 56 #define PTE_ISPAGE(p, l) \
59 57 (PTE_ISVALID(p) && ((l) == 0 || PTE_GET(p, PT_PAGESIZE)))
60 58
61 59 /*
62 60 * Handy macro to check if 2 PTE's are the same - ignores REF/MOD bits.
63 61 * On the 64 bit hypervisor we also have to ignore the high order
64 62 * software bits and the global/user bit which are set/cleared
65 63 * capriciously (by the hypervisor!)
66 64 */
67 65 #if defined(__amd64) && defined(__xpv)
68 66 #define PT_IGNORE ((0x7fful << 52) | PT_GLOBAL | PT_USER)
69 67 #else
70 68 #define PT_IGNORE (0)
71 69 #endif
72 70 #define PTE_EQUIV(a, b) (((a) | (PT_IGNORE | PT_REF | PT_MOD)) == \
73 71 ((b) | (PT_IGNORE | PT_REF | PT_MOD)))
74 72
75 73 /*
76 74 * Shorthand for converting a PTE to it's pfn.
77 75 */
78 76 #define PTE2MFN(p, l) \
79 77 mmu_btop(PTE_GET((p), PTE_IS_LGPG((p), (l)) ? PT_PADDR_LGPG : PT_PADDR))
80 78 #ifdef __xpv
81 79 #define PTE2PFN(p, l) pte2pfn(p, l)
82 80 #else
83 81 #define PTE2PFN(p, l) PTE2MFN(p, l)
84 82 #endif
85 83
86 84 #define PT_NX (0x8000000000000000ull)
87 85 #define PT_PADDR (0x000ffffffffff000ull)
88 86 #define PT_PADDR_LGPG (0x000fffffffffe000ull) /* phys addr for large pages */
89 87
90 88 /*
91 89 * Macros to create a PTP or PTE from the pfn and level
92 90 */
93 91 #ifdef __xpv
94 92
95 93 /*
96 94 * we use the highest order bit in physical address pfns to mark foreign mfns
97 95 */
98 96 #ifdef _LP64
99 97 #define PFN_IS_FOREIGN_MFN (1ul << 51)
100 98 #else
101 99 #define PFN_IS_FOREIGN_MFN (1ul << 31)
102 100 #endif
103 101
104 102 #define MAKEPTP(pfn, l) \
105 103 (pa_to_ma(pfn_to_pa(pfn)) | mmu.ptp_bits[(l) + 1])
106 104 #define MAKEPTE(pfn, l) \
107 105 ((pfn & PFN_IS_FOREIGN_MFN) ? \
108 106 ((pfn_to_pa(pfn & ~PFN_IS_FOREIGN_MFN) | mmu.pte_bits[l]) | \
109 107 PT_FOREIGN | PT_REF | PT_MOD) : \
110 108 (pa_to_ma(pfn_to_pa(pfn)) | mmu.pte_bits[l]))
111 109 #else
112 110 #define MAKEPTP(pfn, l) \
113 111 (pfn_to_pa(pfn) | mmu.ptp_bits[(l) + 1])
114 112 #define MAKEPTE(pfn, l) \
115 113 (pfn_to_pa(pfn) | mmu.pte_bits[l])
116 114 #endif
117 115
118 116 /*
119 117 * The idea of "level" refers to the level where the page table is used in the
120 118 * the hardware address translation steps. The level values correspond to the
121 119 * following names of tables used in AMD/Intel architecture documents:
122 120 *
123 121 * AMD/INTEL name Level #
124 122 * ---------------------- -------
125 123 * Page Map Level 4 3
126 124 * Page Directory Pointer 2
127 125 * Page Directory 1
128 126 * Page Table 0
129 127 *
130 128 * The numbering scheme is such that the values of 0 and 1 can correspond to
131 129 * the pagesize codes used for MPSS support. For now the Maximum level at
132 130 * which you can have a large page is a constant, that may change in
133 131 * future processors.
134 132 *
135 133 * The type of "level_t" is signed so that it can be used like:
136 134 * level_t l;
137 135 * ...
138 136 * while (--l >= 0)
139 137 * ...
140 138 */
141 139 #define MAX_NUM_LEVEL 4
142 140 #define MAX_PAGE_LEVEL 2
143 141 typedef int8_t level_t;
144 142 #define LEVEL_SHIFT(l) (mmu.level_shift[l])
145 143 #define LEVEL_SIZE(l) (mmu.level_size[l])
146 144 #define LEVEL_OFFSET(l) (mmu.level_offset[l])
147 145 #define LEVEL_MASK(l) (mmu.level_mask[l])
148 146
149 147 /*
150 148 * Macros to:
151 149 * Check for a PFN above 4Gig and 64Gig for 32 bit PAE support
152 150 */
153 151 #define PFN_4G (4ull * (1024 * 1024 * 1024 / MMU_PAGESIZE))
154 152 #define PFN_64G (64ull * (1024 * 1024 * 1024 / MMU_PAGESIZE))
155 153 #define PFN_ABOVE4G(pfn) ((pfn) >= PFN_4G)
156 154 #define PFN_ABOVE64G(pfn) ((pfn) >= PFN_64G)
157 155
158 156 /*
159 157 * The CR3 register holds the physical address of the top level page table.
160 158 */
161 159 #define MAKECR3(pfn) mmu_ptob(pfn)
162 160
163 161 /*
164 162 * HAT/MMU parameters that depend on kernel mode and/or processor type
165 163 */
166 164 struct htable;
167 165 struct hat_mmu_info {
168 166 x86pte_t pt_nx; /* either 0 or PT_NX */
169 167 x86pte_t pt_global; /* either 0 or PT_GLOBAL */
170 168
171 169 pfn_t highest_pfn;
172 170
173 171 uint_t num_level; /* number of page table levels in use */
174 172 uint_t max_level; /* just num_level - 1 */
175 173 uint_t max_page_level; /* maximum level at which we can map a page */
176 174 uint_t umax_page_level; /* max user page map level */
177 175 uint_t ptes_per_table; /* # of entries in lower level page tables */
178 176 uint_t top_level_count; /* # of entries in top most level page table */
179 177
180 178 uint_t hash_cnt; /* cnt of entries in htable_hash_cache */
181 179 uint_t vlp_hash_cnt; /* cnt of entries in vlp htable_hash_cache */
182 180
183 181 uint_t pae_hat; /* either 0 or 1 */
184 182
185 183 uintptr_t hole_start; /* start of VA hole (or -1 if none) */
186 184 uintptr_t hole_end; /* end of VA hole (or 0 if none) */
187 185
188 186 struct htable **kmap_htables; /* htables for segmap + 32 bit heap */
189 187 x86pte_t *kmap_ptes; /* mapping of pagetables that map kmap */
190 188 uintptr_t kmap_addr; /* start addr of kmap */
191 189 uintptr_t kmap_eaddr; /* end addr of kmap */
192 190
193 191 uint_t pte_size; /* either 4 or 8 */
194 192 uint_t pte_size_shift; /* either 2 or 3 */
195 193 x86pte_t ptp_bits[MAX_NUM_LEVEL]; /* bits set for interior PTP */
196 194 x86pte_t pte_bits[MAX_NUM_LEVEL]; /* bits set for leaf PTE */
197 195
198 196 /*
199 197 * A range of VA used to window pages in the i86pc/vm code.
200 198 * See PWIN_XXX macros.
201 199 */
202 200 caddr_t pwin_base;
203 201 caddr_t pwin_pte_va;
204 202 paddr_t pwin_pte_pa;
205 203
206 204 /*
207 205 * The following tables are equivalent to PAGEXXXXX at different levels
208 206 * in the page table hierarchy.
209 207 */
210 208 uint_t level_shift[MAX_NUM_LEVEL]; /* PAGESHIFT for given level */
211 209 uintptr_t level_size[MAX_NUM_LEVEL]; /* PAGESIZE for given level */
212 210 uintptr_t level_offset[MAX_NUM_LEVEL]; /* PAGEOFFSET for given level */
213 211 uintptr_t level_mask[MAX_NUM_LEVEL]; /* PAGEMASK for given level */
214 212 };
215 213
216 214
217 215 #if defined(_KERNEL)
218 216
219 217 /*
220 218 * Macros to access the HAT's private page windows. They're used for
221 219 * accessing pagetables, ppcopy() and page_zero().
222 220 * The 1st two macros are used to get an index for the particular use.
223 221 * The next three give you:
224 222 * - the virtual address of the window
225 223 * - the virtual address of the pte that maps the window
226 224 * - the physical address of the pte that map the window
227 225 */
↓ open down ↓ |
187 lines elided |
↑ open up ↑ |
228 226 #define PWIN_TABLE(cpuid) ((cpuid) * 2)
229 227 #define PWIN_SRC(cpuid) ((cpuid) * 2 + 1) /* for x86pte_copy() */
230 228 #define PWIN_VA(x) (mmu.pwin_base + ((x) << MMU_PAGESHIFT))
231 229 #define PWIN_PTE_VA(x) (mmu.pwin_pte_va + ((x) << mmu.pte_size_shift))
232 230 #define PWIN_PTE_PA(x) (mmu.pwin_pte_pa + ((x) << mmu.pte_size_shift))
233 231
234 232 /*
235 233 * The concept of a VA hole exists in AMD64. This might need to be made
236 234 * model specific eventually.
237 235 *
238 - * In the 64 bit kernel PTE loads are atomic, but need cas64 on 32 bit kernel.
236 + * In the 64 bit kernel PTE loads are atomic, but need atomic_cas_64 on 32
237 + * bit kernel.
239 238 */
240 239 #if defined(__amd64)
241 240
242 241 #ifdef lint
243 242 #define IN_VA_HOLE(va) (__lintzero)
244 243 #else
245 244 #define IN_VA_HOLE(va) (mmu.hole_start <= (va) && (va) < mmu.hole_end)
246 245 #endif
247 246
248 247 #define FMT_PTE "0x%lx"
249 248 #define GET_PTE(ptr) (*(x86pte_t *)(ptr))
250 249 #define SET_PTE(ptr, pte) (*(x86pte_t *)(ptr) = pte)
251 -#define CAS_PTE(ptr, x, y) cas64(ptr, x, y)
250 +#define CAS_PTE(ptr, x, y) atomic_cas_64(ptr, x, y)
252 251
253 252 #elif defined(__i386)
254 253
255 254 #define IN_VA_HOLE(va) (__lintzero)
256 255
257 256 #define FMT_PTE "0x%llx"
258 257
259 258 /* on 32 bit kernels, 64 bit loads aren't atomic, use get_pte64() */
260 259 extern x86pte_t get_pte64(x86pte_t *ptr);
261 260 #define GET_PTE(ptr) (mmu.pae_hat ? get_pte64(ptr) : *(x86pte32_t *)(ptr))
262 261 #define SET_PTE(ptr, pte) \
263 262 ((mmu.pae_hat ? ((x86pte32_t *)(ptr))[1] = (pte >> 32) : 0), \
264 263 *(x86pte32_t *)(ptr) = pte)
265 264 #define CAS_PTE(ptr, x, y) \
266 - (mmu.pae_hat ? cas64(ptr, x, y) : \
267 - cas32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
265 + (mmu.pae_hat ? atomic_cas_64(ptr, x, y) : \
266 + atomic_cas_32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
268 267
269 268 #endif /* __i386 */
270 269
271 270 /*
272 271 * Return a pointer to the pte entry at the given index within a page table.
273 272 */
274 273 #define PT_INDEX_PTR(p, x) \
275 274 ((x86pte_t *)((uintptr_t)(p) + ((x) << mmu.pte_size_shift)))
276 275
277 276 /*
278 277 * Return the physical address of the pte entry at the given index within a
279 278 * page table.
280 279 */
281 280 #define PT_INDEX_PHYSADDR(p, x) \
282 281 ((paddr_t)(p) + ((x) << mmu.pte_size_shift))
283 282
284 283 /*
285 284 * From pfn to bytes, careful not to lose bits on PAE.
286 285 */
287 286 #define pfn_to_pa(pfn) (mmu_ptob((paddr_t)(pfn)))
288 287
289 288 #ifdef __xpv
290 289 extern pfn_t pte2pfn(x86pte_t, level_t);
291 290 #endif
292 291
293 292 extern struct hat_mmu_info mmu;
294 293
295 294 #endif /* _KERNEL */
296 295
297 296
298 297 #ifdef __cplusplus
299 298 }
300 299 #endif
301 300
302 301 #endif /* _VM_HAT_PTE_H */
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX