Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * University Copyright- Copyright (c) 1982, 1986, 1988
30 30 * The Regents of the University of California
31 31 * All Rights Reserved
32 32 *
33 33 * University Acknowledgment- Portions of this document are derived from
34 34 * software developed by the University of California, Berkeley, and its
35 35 * contributors.
36 36 */
37 37
38 38 /*
39 39 * VM - shared or copy-on-write from a vnode/anonymous memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/param.h>
44 44 #include <sys/t_lock.h>
45 45 #include <sys/errno.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/cred.h>
50 50 #include <sys/vmsystm.h>
51 51 #include <sys/tuneable.h>
52 52 #include <sys/bitmap.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/sysmacros.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/cmn_err.h>
58 58 #include <sys/callb.h>
59 59 #include <sys/vm.h>
60 60 #include <sys/dumphdr.h>
61 61 #include <sys/lgrp.h>
62 62
63 63 #include <vm/hat.h>
64 64 #include <vm/as.h>
65 65 #include <vm/seg.h>
66 66 #include <vm/seg_vn.h>
67 67 #include <vm/pvn.h>
68 68 #include <vm/anon.h>
69 69 #include <vm/page.h>
70 70 #include <vm/vpage.h>
71 71 #include <sys/proc.h>
72 72 #include <sys/task.h>
73 73 #include <sys/project.h>
74 74 #include <sys/zone.h>
75 75 #include <sys/shm_impl.h>
76 76 /*
77 77 * Private seg op routines.
78 78 */
79 79 static int segvn_dup(struct seg *seg, struct seg *newseg);
80 80 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
81 81 static void segvn_free(struct seg *seg);
82 82 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
83 83 caddr_t addr, size_t len, enum fault_type type,
84 84 enum seg_rw rw);
85 85 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
86 86 static int segvn_setprot(struct seg *seg, caddr_t addr,
87 87 size_t len, uint_t prot);
88 88 static int segvn_checkprot(struct seg *seg, caddr_t addr,
89 89 size_t len, uint_t prot);
90 90 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
91 91 static size_t segvn_swapout(struct seg *seg);
92 92 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
93 93 int attr, uint_t flags);
94 94 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
95 95 char *vec);
96 96 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
97 97 int attr, int op, ulong_t *lockmap, size_t pos);
98 98 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
99 99 uint_t *protv);
100 100 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
101 101 static int segvn_gettype(struct seg *seg, caddr_t addr);
102 102 static int segvn_getvp(struct seg *seg, caddr_t addr,
103 103 struct vnode **vpp);
104 104 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
105 105 uint_t behav);
106 106 static void segvn_dump(struct seg *seg);
107 107 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
108 108 struct page ***ppp, enum lock_type type, enum seg_rw rw);
109 109 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
110 110 uint_t szc);
111 111 static int segvn_getmemid(struct seg *seg, caddr_t addr,
112 112 memid_t *memidp);
113 113 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
114 114 static int segvn_capable(struct seg *seg, segcapability_t capable);
115 115
116 116 struct seg_ops segvn_ops = {
117 117 segvn_dup,
118 118 segvn_unmap,
119 119 segvn_free,
120 120 segvn_fault,
121 121 segvn_faulta,
122 122 segvn_setprot,
123 123 segvn_checkprot,
124 124 segvn_kluster,
125 125 segvn_swapout,
126 126 segvn_sync,
127 127 segvn_incore,
128 128 segvn_lockop,
129 129 segvn_getprot,
130 130 segvn_getoffset,
131 131 segvn_gettype,
132 132 segvn_getvp,
133 133 segvn_advise,
134 134 segvn_dump,
135 135 segvn_pagelock,
136 136 segvn_setpagesize,
137 137 segvn_getmemid,
138 138 segvn_getpolicy,
139 139 segvn_capable,
140 140 };
141 141
142 142 /*
143 143 * Common zfod structures, provided as a shorthand for others to use.
144 144 */
145 145 static segvn_crargs_t zfod_segvn_crargs =
146 146 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
147 147 static segvn_crargs_t kzfod_segvn_crargs =
148 148 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
149 149 PROT_ALL & ~PROT_USER);
150 150 static segvn_crargs_t stack_noexec_crargs =
151 151 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
152 152
153 153 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
154 154 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
155 155 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
156 156 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
157 157
158 158 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
159 159
160 160 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
161 161
162 162 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
163 163 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
164 164 uint_t segvn_pglock_comb_bshift;
165 165 size_t segvn_pglock_comb_palign;
166 166
167 167 static int segvn_concat(struct seg *, struct seg *, int);
168 168 static int segvn_extend_prev(struct seg *, struct seg *,
169 169 struct segvn_crargs *, size_t);
170 170 static int segvn_extend_next(struct seg *, struct seg *,
171 171 struct segvn_crargs *, size_t);
172 172 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
173 173 static void segvn_pagelist_rele(page_t **);
174 174 static void segvn_setvnode_mpss(vnode_t *);
175 175 static void segvn_relocate_pages(page_t **, page_t *);
176 176 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
177 177 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
178 178 uint_t, page_t **, page_t **, uint_t *, int *);
179 179 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
180 180 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
181 181 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
182 182 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
183 183 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
184 184 u_offset_t, struct vpage *, page_t **, uint_t,
185 185 enum fault_type, enum seg_rw, int);
186 186 static void segvn_vpage(struct seg *);
187 187 static size_t segvn_count_swap_by_vpages(struct seg *);
188 188
189 189 static void segvn_purge(struct seg *seg);
190 190 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
191 191 enum seg_rw, int);
192 192 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
193 193 enum seg_rw, int);
194 194
195 195 static int sameprot(struct seg *, caddr_t, size_t);
196 196
197 197 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
198 198 static int segvn_clrszc(struct seg *);
199 199 static struct seg *segvn_split_seg(struct seg *, caddr_t);
200 200 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
201 201 ulong_t, uint_t);
202 202
203 203 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
204 204 size_t, void *, u_offset_t);
205 205
206 206 static struct kmem_cache *segvn_cache;
207 207 static struct kmem_cache **segvn_szc_cache;
208 208
209 209 #ifdef VM_STATS
210 210 static struct segvnvmstats_str {
211 211 ulong_t fill_vp_pages[31];
212 212 ulong_t fltvnpages[49];
213 213 ulong_t fullszcpages[10];
214 214 ulong_t relocatepages[3];
215 215 ulong_t fltanpages[17];
216 216 ulong_t pagelock[2];
217 217 ulong_t demoterange[3];
218 218 } segvnvmstats;
219 219 #endif /* VM_STATS */
220 220
221 221 #define SDR_RANGE 1 /* demote entire range */
222 222 #define SDR_END 2 /* demote non aligned ends only */
223 223
224 224 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
225 225 if ((len) != 0) { \
226 226 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
227 227 ASSERT(lpgaddr >= (seg)->s_base); \
228 228 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
229 229 (len)), pgsz); \
230 230 ASSERT(lpgeaddr > lpgaddr); \
231 231 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
232 232 } else { \
233 233 lpgeaddr = lpgaddr = (addr); \
234 234 } \
235 235 }
236 236
237 237 /*ARGSUSED*/
238 238 static int
239 239 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
240 240 {
241 241 struct segvn_data *svd = buf;
242 242
243 243 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
244 244 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
245 245 svd->svn_trnext = svd->svn_trprev = NULL;
246 246 return (0);
247 247 }
248 248
249 249 /*ARGSUSED1*/
250 250 static void
251 251 segvn_cache_destructor(void *buf, void *cdrarg)
252 252 {
253 253 struct segvn_data *svd = buf;
254 254
255 255 rw_destroy(&svd->lock);
256 256 mutex_destroy(&svd->segfree_syncmtx);
257 257 }
258 258
259 259 /*ARGSUSED*/
260 260 static int
261 261 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
262 262 {
263 263 bzero(buf, sizeof (svntr_t));
264 264 return (0);
265 265 }
266 266
267 267 /*
268 268 * Patching this variable to non-zero allows the system to run with
269 269 * stacks marked as "not executable". It's a bit of a kludge, but is
270 270 * provided as a tweakable for platforms that export those ABIs
271 271 * (e.g. sparc V8) that have executable stacks enabled by default.
272 272 * There are also some restrictions for platforms that don't actually
273 273 * implement 'noexec' protections.
274 274 *
275 275 * Once enabled, the system is (therefore) unable to provide a fully
276 276 * ABI-compliant execution environment, though practically speaking,
277 277 * most everything works. The exceptions are generally some interpreters
278 278 * and debuggers that create executable code on the stack and jump
279 279 * into it (without explicitly mprotecting the address range to include
280 280 * PROT_EXEC).
281 281 *
282 282 * One important class of applications that are disabled are those
283 283 * that have been transformed into malicious agents using one of the
284 284 * numerous "buffer overflow" attacks. See 4007890.
285 285 */
286 286 int noexec_user_stack = 0;
287 287 int noexec_user_stack_log = 1;
288 288
289 289 int segvn_lpg_disable = 0;
290 290 uint_t segvn_maxpgszc = 0;
291 291
292 292 ulong_t segvn_vmpss_clrszc_cnt;
293 293 ulong_t segvn_vmpss_clrszc_err;
294 294 ulong_t segvn_fltvnpages_clrszc_cnt;
295 295 ulong_t segvn_fltvnpages_clrszc_err;
296 296 ulong_t segvn_setpgsz_align_err;
297 297 ulong_t segvn_setpgsz_anon_align_err;
298 298 ulong_t segvn_setpgsz_getattr_err;
299 299 ulong_t segvn_setpgsz_eof_err;
300 300 ulong_t segvn_faultvnmpss_align_err1;
301 301 ulong_t segvn_faultvnmpss_align_err2;
302 302 ulong_t segvn_faultvnmpss_align_err3;
303 303 ulong_t segvn_faultvnmpss_align_err4;
304 304 ulong_t segvn_faultvnmpss_align_err5;
305 305 ulong_t segvn_vmpss_pageio_deadlk_err;
306 306
307 307 int segvn_use_regions = 1;
308 308
309 309 /*
310 310 * Segvn supports text replication optimization for NUMA platforms. Text
311 311 * replica's are represented by anon maps (amp). There's one amp per text file
312 312 * region per lgroup. A process chooses the amp for each of its text mappings
313 313 * based on the lgroup assignment of its main thread (t_tid = 1). All
314 314 * processes that want a replica on a particular lgroup for the same text file
315 315 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
316 316 * with vp,off,size,szc used as a key. Text replication segments are read only
317 317 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
318 318 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
319 319 * pages. Replication amp is assigned to a segment when it gets its first
320 320 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
321 321 * rechecks periodically if the process still maps an amp local to the main
322 322 * thread. If not async thread forces process to remap to an amp in the new
323 323 * home lgroup of the main thread. Current text replication implementation
324 324 * only provides the benefit to workloads that do most of their work in the
325 325 * main thread of a process or all the threads of a process run in the same
326 326 * lgroup. To extend text replication benefit to different types of
327 327 * multithreaded workloads further work would be needed in the hat layer to
328 328 * allow the same virtual address in the same hat to simultaneously map
329 329 * different physical addresses (i.e. page table replication would be needed
330 330 * for x86).
331 331 *
332 332 * amp pages are used instead of vnode pages as long as segment has a very
333 333 * simple life cycle. It's created via segvn_create(), handles S_EXEC
334 334 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
335 335 * happens such as protection is changed, real COW fault happens, pagesize is
336 336 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
337 337 * text replication by converting the segment back to vnode only segment
338 338 * (unmap segment's address range and set svd->amp to NULL).
339 339 *
340 340 * The original file can be changed after amp is inserted into
341 341 * svntr_hashtab. Processes that are launched after the file is already
342 342 * changed can't use the replica's created prior to the file change. To
343 343 * implement this functionality hash entries are timestamped. Replica's can
344 344 * only be used if current file modification time is the same as the timestamp
345 345 * saved when hash entry was created. However just timestamps alone are not
346 346 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
347 347 * deal with file changes via MAP_SHARED mappings differently. When writable
348 348 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
349 349 * existing replica's for this vnode as not usable for future text
350 350 * mappings. And we don't create new replica's for files that currently have
351 351 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
352 352 * true).
353 353 */
354 354
355 355 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
356 356 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
357 357
358 358 static ulong_t svntr_hashtab_sz = 512;
359 359 static svntr_bucket_t *svntr_hashtab = NULL;
360 360 static struct kmem_cache *svntr_cache;
361 361 static svntr_stats_t *segvn_textrepl_stats;
362 362 static ksema_t segvn_trasync_sem;
363 363
364 364 int segvn_disable_textrepl = 1;
365 365 size_t textrepl_size_thresh = (size_t)-1;
366 366 size_t segvn_textrepl_bytes = 0;
367 367 size_t segvn_textrepl_max_bytes = 0;
368 368 clock_t segvn_update_textrepl_interval = 0;
369 369 int segvn_update_tr_time = 10;
370 370 int segvn_disable_textrepl_update = 0;
371 371
372 372 static void segvn_textrepl(struct seg *);
373 373 static void segvn_textunrepl(struct seg *, int);
374 374 static void segvn_inval_trcache(vnode_t *);
375 375 static void segvn_trasync_thread(void);
376 376 static void segvn_trupdate_wakeup(void *);
377 377 static void segvn_trupdate(void);
378 378 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
379 379 ulong_t);
380 380
381 381 /*
382 382 * Initialize segvn data structures
383 383 */
384 384 void
385 385 segvn_init(void)
386 386 {
387 387 uint_t maxszc;
388 388 uint_t szc;
389 389 size_t pgsz;
390 390
391 391 segvn_cache = kmem_cache_create("segvn_cache",
392 392 sizeof (struct segvn_data), 0,
393 393 segvn_cache_constructor, segvn_cache_destructor, NULL,
394 394 NULL, NULL, 0);
395 395
396 396 if (segvn_lpg_disable == 0) {
397 397 szc = maxszc = page_num_pagesizes() - 1;
398 398 if (szc == 0) {
399 399 segvn_lpg_disable = 1;
400 400 }
401 401 if (page_get_pagesize(0) != PAGESIZE) {
402 402 panic("segvn_init: bad szc 0");
403 403 /*NOTREACHED*/
404 404 }
405 405 while (szc != 0) {
406 406 pgsz = page_get_pagesize(szc);
407 407 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
408 408 panic("segvn_init: bad szc %d", szc);
409 409 /*NOTREACHED*/
410 410 }
411 411 szc--;
412 412 }
413 413 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
414 414 segvn_maxpgszc = maxszc;
415 415 }
416 416
417 417 if (segvn_maxpgszc) {
418 418 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
419 419 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
420 420 KM_SLEEP);
421 421 }
422 422
423 423 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
424 424 char str[32];
425 425
426 426 (void) sprintf(str, "segvn_szc_cache%d", szc);
427 427 segvn_szc_cache[szc] = kmem_cache_create(str,
428 428 page_get_pagecnt(szc) * sizeof (page_t *), 0,
429 429 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
430 430 }
431 431
432 432
433 433 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
434 434 segvn_use_regions = 0;
435 435
436 436 /*
437 437 * For now shared regions and text replication segvn support
438 438 * are mutually exclusive. This is acceptable because
439 439 * currently significant benefit from text replication was
440 440 * only observed on AMD64 NUMA platforms (due to relatively
441 441 * small L2$ size) and currently we don't support shared
442 442 * regions on x86.
443 443 */
444 444 if (segvn_use_regions && !segvn_disable_textrepl) {
445 445 segvn_disable_textrepl = 1;
446 446 }
447 447
448 448 #if defined(_LP64)
449 449 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
450 450 !segvn_disable_textrepl) {
451 451 ulong_t i;
452 452 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
453 453
454 454 svntr_cache = kmem_cache_create("svntr_cache",
455 455 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
456 456 NULL, NULL, NULL, 0);
457 457 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
458 458 for (i = 0; i < svntr_hashtab_sz; i++) {
459 459 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
460 460 MUTEX_DEFAULT, NULL);
461 461 }
462 462 segvn_textrepl_max_bytes = ptob(physmem) /
463 463 segvn_textrepl_max_bytes_factor;
464 464 segvn_textrepl_stats = kmem_zalloc(NCPU *
465 465 sizeof (svntr_stats_t), KM_SLEEP);
466 466 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
467 467 (void) thread_create(NULL, 0, segvn_trasync_thread,
468 468 NULL, 0, &p0, TS_RUN, minclsyspri);
469 469 }
470 470 #endif
471 471
472 472 if (!ISP2(segvn_pglock_comb_balign) ||
473 473 segvn_pglock_comb_balign < PAGESIZE) {
474 474 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
475 475 }
476 476 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
477 477 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
478 478 }
479 479
480 480 #define SEGVN_PAGEIO ((void *)0x1)
481 481 #define SEGVN_NOPAGEIO ((void *)0x2)
482 482
483 483 static void
484 484 segvn_setvnode_mpss(vnode_t *vp)
485 485 {
486 486 int err;
487 487
488 488 ASSERT(vp->v_mpssdata == NULL ||
489 489 vp->v_mpssdata == SEGVN_PAGEIO ||
490 490 vp->v_mpssdata == SEGVN_NOPAGEIO);
491 491
492 492 if (vp->v_mpssdata == NULL) {
493 493 if (vn_vmpss_usepageio(vp)) {
494 494 err = VOP_PAGEIO(vp, (page_t *)NULL,
495 495 (u_offset_t)0, 0, 0, CRED(), NULL);
496 496 } else {
497 497 err = ENOSYS;
498 498 }
499 499 /*
500 500 * set v_mpssdata just once per vnode life
501 501 * so that it never changes.
502 502 */
503 503 mutex_enter(&vp->v_lock);
504 504 if (vp->v_mpssdata == NULL) {
505 505 if (err == EINVAL) {
506 506 vp->v_mpssdata = SEGVN_PAGEIO;
507 507 } else {
508 508 vp->v_mpssdata = SEGVN_NOPAGEIO;
509 509 }
510 510 }
511 511 mutex_exit(&vp->v_lock);
512 512 }
513 513 }
514 514
515 515 int
516 516 segvn_create(struct seg *seg, void *argsp)
517 517 {
518 518 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
519 519 struct segvn_data *svd;
520 520 size_t swresv = 0;
521 521 struct cred *cred;
522 522 struct anon_map *amp;
523 523 int error = 0;
524 524 size_t pgsz;
525 525 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
526 526 int use_rgn = 0;
527 527 int trok = 0;
528 528
529 529 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
530 530
531 531 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
532 532 panic("segvn_create type");
533 533 /*NOTREACHED*/
534 534 }
535 535
536 536 /*
537 537 * Check arguments. If a shared anon structure is given then
538 538 * it is illegal to also specify a vp.
539 539 */
540 540 if (a->amp != NULL && a->vp != NULL) {
541 541 panic("segvn_create anon_map");
542 542 /*NOTREACHED*/
543 543 }
544 544
545 545 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
546 546 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
547 547 segvn_use_regions) {
548 548 use_rgn = 1;
549 549 }
550 550
551 551 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
552 552 if (a->type == MAP_SHARED)
553 553 a->flags &= ~MAP_NORESERVE;
554 554
555 555 if (a->szc != 0) {
556 556 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
557 557 (a->amp != NULL && a->type == MAP_PRIVATE) ||
558 558 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
559 559 a->szc = 0;
560 560 } else {
561 561 if (a->szc > segvn_maxpgszc)
562 562 a->szc = segvn_maxpgszc;
563 563 pgsz = page_get_pagesize(a->szc);
564 564 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
565 565 !IS_P2ALIGNED(seg->s_size, pgsz)) {
566 566 a->szc = 0;
567 567 } else if (a->vp != NULL) {
568 568 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
569 569 /*
570 570 * paranoid check.
571 571 * hat_page_demote() is not supported
572 572 * on swapfs pages.
573 573 */
574 574 a->szc = 0;
575 575 } else if (map_addr_vacalign_check(seg->s_base,
576 576 a->offset & PAGEMASK)) {
577 577 a->szc = 0;
578 578 }
579 579 } else if (a->amp != NULL) {
580 580 pgcnt_t anum = btopr(a->offset);
581 581 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
582 582 if (!IS_P2ALIGNED(anum, pgcnt)) {
583 583 a->szc = 0;
584 584 }
585 585 }
586 586 }
587 587 }
588 588
589 589 /*
590 590 * If segment may need private pages, reserve them now.
591 591 */
592 592 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
593 593 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
594 594 if (anon_resv_zone(seg->s_size,
595 595 seg->s_as->a_proc->p_zone) == 0)
596 596 return (EAGAIN);
597 597 swresv = seg->s_size;
598 598 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
599 599 seg, swresv, 1);
600 600 }
601 601
602 602 /*
603 603 * Reserve any mapping structures that may be required.
604 604 *
605 605 * Don't do it for segments that may use regions. It's currently a
606 606 * noop in the hat implementations anyway.
607 607 */
608 608 if (!use_rgn) {
609 609 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
610 610 }
611 611
612 612 if (a->cred) {
613 613 cred = a->cred;
614 614 crhold(cred);
615 615 } else {
616 616 crhold(cred = CRED());
617 617 }
618 618
619 619 /* Inform the vnode of the new mapping */
620 620 if (a->vp != NULL) {
621 621 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
622 622 seg->s_as, seg->s_base, seg->s_size, a->prot,
623 623 a->maxprot, a->type, cred, NULL);
624 624 if (error) {
625 625 if (swresv != 0) {
626 626 anon_unresv_zone(swresv,
627 627 seg->s_as->a_proc->p_zone);
628 628 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
629 629 "anon proc:%p %lu %u", seg, swresv, 0);
630 630 }
631 631 crfree(cred);
632 632 if (!use_rgn) {
633 633 hat_unload(seg->s_as->a_hat, seg->s_base,
634 634 seg->s_size, HAT_UNLOAD_UNMAP);
635 635 }
636 636 return (error);
637 637 }
638 638 /*
639 639 * svntr_hashtab will be NULL if we support shared regions.
640 640 */
641 641 trok = ((a->flags & MAP_TEXT) &&
642 642 (seg->s_size > textrepl_size_thresh ||
643 643 (a->flags & _MAP_TEXTREPL)) &&
644 644 lgrp_optimizations() && svntr_hashtab != NULL &&
645 645 a->type == MAP_PRIVATE && swresv == 0 &&
646 646 !(a->flags & MAP_NORESERVE) &&
647 647 seg->s_as != &kas && a->vp->v_type == VREG);
648 648
649 649 ASSERT(!trok || !use_rgn);
650 650 }
651 651
652 652 /*
653 653 * MAP_NORESERVE mappings don't count towards the VSZ of a process
654 654 * until we fault the pages in.
655 655 */
656 656 if ((a->vp == NULL || a->vp->v_type != VREG) &&
657 657 a->flags & MAP_NORESERVE) {
658 658 seg->s_as->a_resvsize -= seg->s_size;
659 659 }
660 660
661 661 /*
662 662 * If more than one segment in the address space, and they're adjacent
663 663 * virtually, try to concatenate them. Don't concatenate if an
664 664 * explicit anon_map structure was supplied (e.g., SystemV shared
665 665 * memory) or if we'll use text replication for this segment.
666 666 */
667 667 if (a->amp == NULL && !use_rgn && !trok) {
668 668 struct seg *pseg, *nseg;
669 669 struct segvn_data *psvd, *nsvd;
670 670 lgrp_mem_policy_t ppolicy, npolicy;
671 671 uint_t lgrp_mem_policy_flags = 0;
672 672 extern lgrp_mem_policy_t lgrp_mem_default_policy;
673 673
674 674 /*
675 675 * Memory policy flags (lgrp_mem_policy_flags) is valid when
676 676 * extending stack/heap segments.
677 677 */
678 678 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
679 679 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
680 680 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
681 681 } else {
682 682 /*
683 683 * Get policy when not extending it from another segment
684 684 */
685 685 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
686 686 }
687 687
688 688 /*
689 689 * First, try to concatenate the previous and new segments
690 690 */
691 691 pseg = AS_SEGPREV(seg->s_as, seg);
692 692 if (pseg != NULL &&
693 693 pseg->s_base + pseg->s_size == seg->s_base &&
694 694 pseg->s_ops == &segvn_ops) {
695 695 /*
696 696 * Get memory allocation policy from previous segment.
697 697 * When extension is specified (e.g. for heap) apply
698 698 * this policy to the new segment regardless of the
699 699 * outcome of segment concatenation. Extension occurs
700 700 * for non-default policy otherwise default policy is
701 701 * used and is based on extended segment size.
702 702 */
703 703 psvd = (struct segvn_data *)pseg->s_data;
704 704 ppolicy = psvd->policy_info.mem_policy;
705 705 if (lgrp_mem_policy_flags ==
706 706 LGRP_MP_FLAG_EXTEND_UP) {
707 707 if (ppolicy != lgrp_mem_default_policy) {
708 708 mpolicy = ppolicy;
709 709 } else {
710 710 mpolicy = lgrp_mem_policy_default(
711 711 pseg->s_size + seg->s_size,
712 712 a->type);
713 713 }
714 714 }
715 715
716 716 if (mpolicy == ppolicy &&
717 717 (pseg->s_size + seg->s_size <=
718 718 segvn_comb_thrshld || psvd->amp == NULL) &&
719 719 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
720 720 /*
721 721 * success! now try to concatenate
722 722 * with following seg
723 723 */
724 724 crfree(cred);
725 725 nseg = AS_SEGNEXT(pseg->s_as, pseg);
726 726 if (nseg != NULL &&
727 727 nseg != pseg &&
728 728 nseg->s_ops == &segvn_ops &&
729 729 pseg->s_base + pseg->s_size ==
730 730 nseg->s_base)
731 731 (void) segvn_concat(pseg, nseg, 0);
732 732 ASSERT(pseg->s_szc == 0 ||
733 733 (a->szc == pseg->s_szc &&
734 734 IS_P2ALIGNED(pseg->s_base, pgsz) &&
735 735 IS_P2ALIGNED(pseg->s_size, pgsz)));
736 736 return (0);
737 737 }
738 738 }
739 739
740 740 /*
741 741 * Failed, so try to concatenate with following seg
742 742 */
743 743 nseg = AS_SEGNEXT(seg->s_as, seg);
744 744 if (nseg != NULL &&
745 745 seg->s_base + seg->s_size == nseg->s_base &&
746 746 nseg->s_ops == &segvn_ops) {
747 747 /*
748 748 * Get memory allocation policy from next segment.
749 749 * When extension is specified (e.g. for stack) apply
750 750 * this policy to the new segment regardless of the
751 751 * outcome of segment concatenation. Extension occurs
752 752 * for non-default policy otherwise default policy is
753 753 * used and is based on extended segment size.
754 754 */
755 755 nsvd = (struct segvn_data *)nseg->s_data;
756 756 npolicy = nsvd->policy_info.mem_policy;
757 757 if (lgrp_mem_policy_flags ==
758 758 LGRP_MP_FLAG_EXTEND_DOWN) {
759 759 if (npolicy != lgrp_mem_default_policy) {
760 760 mpolicy = npolicy;
761 761 } else {
762 762 mpolicy = lgrp_mem_policy_default(
763 763 nseg->s_size + seg->s_size,
764 764 a->type);
765 765 }
766 766 }
767 767
768 768 if (mpolicy == npolicy &&
769 769 segvn_extend_next(seg, nseg, a, swresv) == 0) {
770 770 crfree(cred);
771 771 ASSERT(nseg->s_szc == 0 ||
772 772 (a->szc == nseg->s_szc &&
773 773 IS_P2ALIGNED(nseg->s_base, pgsz) &&
774 774 IS_P2ALIGNED(nseg->s_size, pgsz)));
775 775 return (0);
776 776 }
777 777 }
778 778 }
779 779
780 780 if (a->vp != NULL) {
781 781 VN_HOLD(a->vp);
782 782 if (a->type == MAP_SHARED)
783 783 lgrp_shm_policy_init(NULL, a->vp);
784 784 }
785 785 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
786 786
787 787 seg->s_ops = &segvn_ops;
788 788 seg->s_data = (void *)svd;
789 789 seg->s_szc = a->szc;
790 790
791 791 svd->seg = seg;
792 792 svd->vp = a->vp;
793 793 /*
794 794 * Anonymous mappings have no backing file so the offset is meaningless.
795 795 */
796 796 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
797 797 svd->prot = a->prot;
798 798 svd->maxprot = a->maxprot;
799 799 svd->pageprot = 0;
800 800 svd->type = a->type;
801 801 svd->vpage = NULL;
802 802 svd->cred = cred;
803 803 svd->advice = MADV_NORMAL;
804 804 svd->pageadvice = 0;
805 805 svd->flags = (ushort_t)a->flags;
806 806 svd->softlockcnt = 0;
807 807 svd->softlockcnt_sbase = 0;
808 808 svd->softlockcnt_send = 0;
809 809 svd->rcookie = HAT_INVALID_REGION_COOKIE;
810 810 svd->pageswap = 0;
811 811
812 812 if (a->szc != 0 && a->vp != NULL) {
813 813 segvn_setvnode_mpss(a->vp);
814 814 }
815 815 if (svd->type == MAP_SHARED && svd->vp != NULL &&
816 816 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
817 817 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
818 818 segvn_inval_trcache(svd->vp);
819 819 }
820 820
821 821 amp = a->amp;
822 822 if ((svd->amp = amp) == NULL) {
823 823 svd->anon_index = 0;
824 824 if (svd->type == MAP_SHARED) {
825 825 svd->swresv = 0;
826 826 /*
827 827 * Shared mappings to a vp need no other setup.
828 828 * If we have a shared mapping to an anon_map object
829 829 * which hasn't been allocated yet, allocate the
830 830 * struct now so that it will be properly shared
831 831 * by remembering the swap reservation there.
832 832 */
833 833 if (a->vp == NULL) {
834 834 svd->amp = anonmap_alloc(seg->s_size, swresv,
835 835 ANON_SLEEP);
836 836 svd->amp->a_szc = seg->s_szc;
837 837 }
838 838 } else {
839 839 /*
840 840 * Private mapping (with or without a vp).
841 841 * Allocate anon_map when needed.
842 842 */
843 843 svd->swresv = swresv;
844 844 }
845 845 } else {
846 846 pgcnt_t anon_num;
847 847
848 848 /*
849 849 * Mapping to an existing anon_map structure without a vp.
850 850 * For now we will insure that the segment size isn't larger
851 851 * than the size - offset gives us. Later on we may wish to
852 852 * have the anon array dynamically allocated itself so that
853 853 * we don't always have to allocate all the anon pointer slots.
854 854 * This of course involves adding extra code to check that we
855 855 * aren't trying to use an anon pointer slot beyond the end
856 856 * of the currently allocated anon array.
857 857 */
858 858 if ((amp->size - a->offset) < seg->s_size) {
859 859 panic("segvn_create anon_map size");
860 860 /*NOTREACHED*/
861 861 }
862 862
863 863 anon_num = btopr(a->offset);
864 864
865 865 if (a->type == MAP_SHARED) {
866 866 /*
867 867 * SHARED mapping to a given anon_map.
868 868 */
869 869 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
870 870 amp->refcnt++;
871 871 if (a->szc > amp->a_szc) {
872 872 amp->a_szc = a->szc;
873 873 }
874 874 ANON_LOCK_EXIT(&->a_rwlock);
875 875 svd->anon_index = anon_num;
876 876 svd->swresv = 0;
877 877 } else {
878 878 /*
879 879 * PRIVATE mapping to a given anon_map.
880 880 * Make sure that all the needed anon
881 881 * structures are created (so that we will
882 882 * share the underlying pages if nothing
883 883 * is written by this mapping) and then
884 884 * duplicate the anon array as is done
885 885 * when a privately mapped segment is dup'ed.
886 886 */
887 887 struct anon *ap;
888 888 caddr_t addr;
889 889 caddr_t eaddr;
890 890 ulong_t anon_idx;
891 891 int hat_flag = HAT_LOAD;
892 892
893 893 if (svd->flags & MAP_TEXT) {
894 894 hat_flag |= HAT_LOAD_TEXT;
895 895 }
896 896
897 897 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
898 898 svd->amp->a_szc = seg->s_szc;
899 899 svd->anon_index = 0;
900 900 svd->swresv = swresv;
901 901
902 902 /*
903 903 * Prevent 2 threads from allocating anon
904 904 * slots simultaneously.
905 905 */
906 906 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
907 907 eaddr = seg->s_base + seg->s_size;
908 908
909 909 for (anon_idx = anon_num, addr = seg->s_base;
910 910 addr < eaddr; addr += PAGESIZE, anon_idx++) {
911 911 page_t *pp;
912 912
913 913 if ((ap = anon_get_ptr(amp->ahp,
914 914 anon_idx)) != NULL)
915 915 continue;
916 916
917 917 /*
918 918 * Allocate the anon struct now.
919 919 * Might as well load up translation
920 920 * to the page while we're at it...
921 921 */
922 922 pp = anon_zero(seg, addr, &ap, cred);
923 923 if (ap == NULL || pp == NULL) {
924 924 panic("segvn_create anon_zero");
925 925 /*NOTREACHED*/
926 926 }
927 927
928 928 /*
929 929 * Re-acquire the anon_map lock and
930 930 * initialize the anon array entry.
931 931 */
932 932 ASSERT(anon_get_ptr(amp->ahp,
933 933 anon_idx) == NULL);
934 934 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
935 935 ANON_SLEEP);
936 936
937 937 ASSERT(seg->s_szc == 0);
938 938 ASSERT(!IS_VMODSORT(pp->p_vnode));
939 939
940 940 ASSERT(use_rgn == 0);
941 941 hat_memload(seg->s_as->a_hat, addr, pp,
942 942 svd->prot & ~PROT_WRITE, hat_flag);
943 943
944 944 page_unlock(pp);
945 945 }
946 946 ASSERT(seg->s_szc == 0);
947 947 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
948 948 0, seg->s_size);
949 949 ANON_LOCK_EXIT(&->a_rwlock);
950 950 }
951 951 }
952 952
953 953 /*
954 954 * Set default memory allocation policy for segment
955 955 *
956 956 * Always set policy for private memory at least for initialization
957 957 * even if this is a shared memory segment
958 958 */
959 959 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
960 960
961 961 if (svd->type == MAP_SHARED)
962 962 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
963 963 svd->vp, svd->offset, seg->s_size);
964 964
965 965 if (use_rgn) {
966 966 ASSERT(!trok);
967 967 ASSERT(svd->amp == NULL);
968 968 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
969 969 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
970 970 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
971 971 HAT_REGION_TEXT);
972 972 }
973 973
974 974 ASSERT(!trok || !(svd->prot & PROT_WRITE));
975 975 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
976 976
977 977 return (0);
978 978 }
979 979
980 980 /*
981 981 * Concatenate two existing segments, if possible.
982 982 * Return 0 on success, -1 if two segments are not compatible
983 983 * or -2 on memory allocation failure.
984 984 * If amp_cat == 1 then try and concat segments with anon maps
985 985 */
986 986 static int
987 987 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
988 988 {
989 989 struct segvn_data *svd1 = seg1->s_data;
990 990 struct segvn_data *svd2 = seg2->s_data;
991 991 struct anon_map *amp1 = svd1->amp;
992 992 struct anon_map *amp2 = svd2->amp;
993 993 struct vpage *vpage1 = svd1->vpage;
994 994 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
995 995 size_t size, nvpsize;
996 996 pgcnt_t npages1, npages2;
997 997
998 998 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
999 999 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1000 1000 ASSERT(seg1->s_ops == seg2->s_ops);
1001 1001
1002 1002 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1003 1003 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1004 1004 return (-1);
1005 1005 }
1006 1006
1007 1007 /* both segments exist, try to merge them */
1008 1008 #define incompat(x) (svd1->x != svd2->x)
1009 1009 if (incompat(vp) || incompat(maxprot) ||
1010 1010 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1011 1011 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1012 1012 incompat(type) || incompat(cred) || incompat(flags) ||
1013 1013 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1014 1014 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1015 1015 return (-1);
1016 1016 #undef incompat
1017 1017
1018 1018 /*
1019 1019 * vp == NULL implies zfod, offset doesn't matter
1020 1020 */
1021 1021 if (svd1->vp != NULL &&
1022 1022 svd1->offset + seg1->s_size != svd2->offset) {
1023 1023 return (-1);
1024 1024 }
1025 1025
1026 1026 /*
1027 1027 * Don't concatenate if either segment uses text replication.
1028 1028 */
1029 1029 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1030 1030 return (-1);
1031 1031 }
1032 1032
1033 1033 /*
1034 1034 * Fail early if we're not supposed to concatenate
1035 1035 * segments with non NULL amp.
1036 1036 */
1037 1037 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1038 1038 return (-1);
1039 1039 }
1040 1040
1041 1041 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1042 1042 if (amp1 != amp2) {
1043 1043 return (-1);
1044 1044 }
1045 1045 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1046 1046 svd2->anon_index) {
1047 1047 return (-1);
1048 1048 }
1049 1049 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1050 1050 }
1051 1051
1052 1052 /*
1053 1053 * If either seg has vpages, create a new merged vpage array.
1054 1054 */
1055 1055 if (vpage1 != NULL || vpage2 != NULL) {
1056 1056 struct vpage *vp, *evp;
1057 1057
1058 1058 npages1 = seg_pages(seg1);
1059 1059 npages2 = seg_pages(seg2);
1060 1060 nvpsize = vpgtob(npages1 + npages2);
1061 1061
1062 1062 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1063 1063 return (-2);
1064 1064 }
1065 1065
1066 1066 if (vpage1 != NULL) {
1067 1067 bcopy(vpage1, nvpage, vpgtob(npages1));
1068 1068 } else {
1069 1069 evp = nvpage + npages1;
1070 1070 for (vp = nvpage; vp < evp; vp++) {
1071 1071 VPP_SETPROT(vp, svd1->prot);
1072 1072 VPP_SETADVICE(vp, svd1->advice);
1073 1073 }
1074 1074 }
1075 1075
1076 1076 if (vpage2 != NULL) {
1077 1077 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1078 1078 } else {
1079 1079 evp = nvpage + npages1 + npages2;
1080 1080 for (vp = nvpage + npages1; vp < evp; vp++) {
1081 1081 VPP_SETPROT(vp, svd2->prot);
1082 1082 VPP_SETADVICE(vp, svd2->advice);
1083 1083 }
1084 1084 }
1085 1085
1086 1086 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1087 1087 ASSERT(svd1->swresv == seg1->s_size);
1088 1088 ASSERT(!(svd1->flags & MAP_NORESERVE));
1089 1089 ASSERT(!(svd2->flags & MAP_NORESERVE));
1090 1090 evp = nvpage + npages1;
1091 1091 for (vp = nvpage; vp < evp; vp++) {
1092 1092 VPP_SETSWAPRES(vp);
1093 1093 }
1094 1094 }
1095 1095
1096 1096 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1097 1097 ASSERT(svd2->swresv == seg2->s_size);
1098 1098 ASSERT(!(svd1->flags & MAP_NORESERVE));
1099 1099 ASSERT(!(svd2->flags & MAP_NORESERVE));
1100 1100 vp = nvpage + npages1;
1101 1101 evp = vp + npages2;
1102 1102 for (; vp < evp; vp++) {
1103 1103 VPP_SETSWAPRES(vp);
1104 1104 }
1105 1105 }
1106 1106 }
1107 1107 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1108 1108 (svd1->pageswap == 0 && svd2->pageswap == 0));
1109 1109
1110 1110 /*
1111 1111 * If either segment has private pages, create a new merged anon
1112 1112 * array. If mergeing shared anon segments just decrement anon map's
1113 1113 * refcnt.
1114 1114 */
1115 1115 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1116 1116 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1117 1117 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1118 1118 ASSERT(amp1->refcnt >= 2);
1119 1119 amp1->refcnt--;
1120 1120 ANON_LOCK_EXIT(&1->a_rwlock);
1121 1121 svd2->amp = NULL;
1122 1122 } else if (amp1 != NULL || amp2 != NULL) {
1123 1123 struct anon_hdr *nahp;
1124 1124 struct anon_map *namp = NULL;
1125 1125 size_t asize;
1126 1126
1127 1127 ASSERT(svd1->type == MAP_PRIVATE);
1128 1128
1129 1129 asize = seg1->s_size + seg2->s_size;
1130 1130 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1131 1131 if (nvpage != NULL) {
1132 1132 kmem_free(nvpage, nvpsize);
1133 1133 }
1134 1134 return (-2);
1135 1135 }
1136 1136 if (amp1 != NULL) {
1137 1137 /*
1138 1138 * XXX anon rwlock is not really needed because
1139 1139 * this is a private segment and we are writers.
1140 1140 */
1141 1141 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1142 1142 ASSERT(amp1->refcnt == 1);
1143 1143 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1144 1144 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1145 1145 anon_release(nahp, btop(asize));
1146 1146 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1147 if (nvpage != NULL) {
1148 1148 kmem_free(nvpage, nvpsize);
1149 1149 }
1150 1150 return (-2);
1151 1151 }
1152 1152 }
1153 1153 if (amp2 != NULL) {
1154 1154 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1155 1155 ASSERT(amp2->refcnt == 1);
1156 1156 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1157 1157 nahp, btop(seg1->s_size), btop(seg2->s_size),
1158 1158 ANON_NOSLEEP)) {
1159 1159 anon_release(nahp, btop(asize));
1160 1160 ANON_LOCK_EXIT(&2->a_rwlock);
1161 1161 if (amp1 != NULL) {
1162 1162 ANON_LOCK_EXIT(&1->a_rwlock);
1163 1163 }
1164 1164 if (nvpage != NULL) {
1165 1165 kmem_free(nvpage, nvpsize);
1166 1166 }
1167 1167 return (-2);
1168 1168 }
1169 1169 }
1170 1170 if (amp1 != NULL) {
1171 1171 namp = amp1;
1172 1172 anon_release(amp1->ahp, btop(amp1->size));
1173 1173 }
1174 1174 if (amp2 != NULL) {
1175 1175 if (namp == NULL) {
1176 1176 ASSERT(amp1 == NULL);
1177 1177 namp = amp2;
1178 1178 anon_release(amp2->ahp, btop(amp2->size));
1179 1179 } else {
1180 1180 amp2->refcnt--;
1181 1181 ANON_LOCK_EXIT(&2->a_rwlock);
1182 1182 anonmap_free(amp2);
1183 1183 }
1184 1184 svd2->amp = NULL; /* needed for seg_free */
1185 1185 }
1186 1186 namp->ahp = nahp;
1187 1187 namp->size = asize;
1188 1188 svd1->amp = namp;
1189 1189 svd1->anon_index = 0;
1190 1190 ANON_LOCK_EXIT(&namp->a_rwlock);
1191 1191 }
1192 1192 /*
1193 1193 * Now free the old vpage structures.
1194 1194 */
1195 1195 if (nvpage != NULL) {
1196 1196 if (vpage1 != NULL) {
1197 1197 kmem_free(vpage1, vpgtob(npages1));
1198 1198 }
1199 1199 if (vpage2 != NULL) {
1200 1200 svd2->vpage = NULL;
1201 1201 kmem_free(vpage2, vpgtob(npages2));
1202 1202 }
1203 1203 if (svd2->pageprot) {
1204 1204 svd1->pageprot = 1;
1205 1205 }
1206 1206 if (svd2->pageadvice) {
1207 1207 svd1->pageadvice = 1;
1208 1208 }
1209 1209 if (svd2->pageswap) {
1210 1210 svd1->pageswap = 1;
1211 1211 }
1212 1212 svd1->vpage = nvpage;
1213 1213 }
1214 1214
1215 1215 /* all looks ok, merge segments */
1216 1216 svd1->swresv += svd2->swresv;
1217 1217 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1218 1218 size = seg2->s_size;
1219 1219 seg_free(seg2);
1220 1220 seg1->s_size += size;
1221 1221 return (0);
1222 1222 }
1223 1223
1224 1224 /*
1225 1225 * Extend the previous segment (seg1) to include the
1226 1226 * new segment (seg2 + a), if possible.
1227 1227 * Return 0 on success.
1228 1228 */
1229 1229 static int
1230 1230 segvn_extend_prev(seg1, seg2, a, swresv)
1231 1231 struct seg *seg1, *seg2;
1232 1232 struct segvn_crargs *a;
1233 1233 size_t swresv;
1234 1234 {
1235 1235 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1236 1236 size_t size;
1237 1237 struct anon_map *amp1;
1238 1238 struct vpage *new_vpage;
1239 1239
1240 1240 /*
1241 1241 * We don't need any segment level locks for "segvn" data
1242 1242 * since the address space is "write" locked.
1243 1243 */
1244 1244 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1245 1245
1246 1246 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1247 1247 return (-1);
1248 1248 }
1249 1249
1250 1250 /* second segment is new, try to extend first */
1251 1251 /* XXX - should also check cred */
1252 1252 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1253 1253 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1254 1254 svd1->type != a->type || svd1->flags != a->flags ||
1255 1255 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1256 1256 return (-1);
1257 1257
1258 1258 /* vp == NULL implies zfod, offset doesn't matter */
1259 1259 if (svd1->vp != NULL &&
1260 1260 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1261 1261 return (-1);
1262 1262
1263 1263 if (svd1->tr_state != SEGVN_TR_OFF) {
1264 1264 return (-1);
1265 1265 }
1266 1266
1267 1267 amp1 = svd1->amp;
1268 1268 if (amp1) {
1269 1269 pgcnt_t newpgs;
1270 1270
1271 1271 /*
1272 1272 * Segment has private pages, can data structures
1273 1273 * be expanded?
1274 1274 *
1275 1275 * Acquire the anon_map lock to prevent it from changing,
1276 1276 * if it is shared. This ensures that the anon_map
1277 1277 * will not change while a thread which has a read/write
1278 1278 * lock on an address space references it.
1279 1279 * XXX - Don't need the anon_map lock at all if "refcnt"
1280 1280 * is 1.
1281 1281 *
1282 1282 * Can't grow a MAP_SHARED segment with an anonmap because
1283 1283 * there may be existing anon slots where we want to extend
1284 1284 * the segment and we wouldn't know what to do with them
1285 1285 * (e.g., for tmpfs right thing is to just leave them there,
1286 1286 * for /dev/zero they should be cleared out).
1287 1287 */
1288 1288 if (svd1->type == MAP_SHARED)
1289 1289 return (-1);
1290 1290
1291 1291 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1292 1292 if (amp1->refcnt > 1) {
1293 1293 ANON_LOCK_EXIT(&1->a_rwlock);
1294 1294 return (-1);
1295 1295 }
1296 1296 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1297 1297 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1298 1298
1299 1299 if (newpgs == 0) {
1300 1300 ANON_LOCK_EXIT(&1->a_rwlock);
1301 1301 return (-1);
1302 1302 }
1303 1303 amp1->size = ptob(newpgs);
1304 1304 ANON_LOCK_EXIT(&1->a_rwlock);
1305 1305 }
1306 1306 if (svd1->vpage != NULL) {
1307 1307 struct vpage *vp, *evp;
1308 1308 new_vpage =
1309 1309 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1310 1310 KM_NOSLEEP);
1311 1311 if (new_vpage == NULL)
1312 1312 return (-1);
1313 1313 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1314 1314 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1315 1315 svd1->vpage = new_vpage;
1316 1316
1317 1317 vp = new_vpage + seg_pages(seg1);
1318 1318 evp = vp + seg_pages(seg2);
1319 1319 for (; vp < evp; vp++)
1320 1320 VPP_SETPROT(vp, a->prot);
1321 1321 if (svd1->pageswap && swresv) {
1322 1322 ASSERT(!(svd1->flags & MAP_NORESERVE));
1323 1323 ASSERT(swresv == seg2->s_size);
1324 1324 vp = new_vpage + seg_pages(seg1);
1325 1325 for (; vp < evp; vp++) {
1326 1326 VPP_SETSWAPRES(vp);
1327 1327 }
1328 1328 }
1329 1329 }
1330 1330 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1331 1331 size = seg2->s_size;
1332 1332 seg_free(seg2);
1333 1333 seg1->s_size += size;
1334 1334 svd1->swresv += swresv;
1335 1335 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1336 1336 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1337 1337 (svd1->vp->v_flag & VVMEXEC)) {
1338 1338 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1339 1339 segvn_inval_trcache(svd1->vp);
1340 1340 }
1341 1341 return (0);
1342 1342 }
1343 1343
1344 1344 /*
1345 1345 * Extend the next segment (seg2) to include the
1346 1346 * new segment (seg1 + a), if possible.
1347 1347 * Return 0 on success.
1348 1348 */
1349 1349 static int
1350 1350 segvn_extend_next(
1351 1351 struct seg *seg1,
1352 1352 struct seg *seg2,
1353 1353 struct segvn_crargs *a,
1354 1354 size_t swresv)
1355 1355 {
1356 1356 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1357 1357 size_t size;
1358 1358 struct anon_map *amp2;
1359 1359 struct vpage *new_vpage;
1360 1360
1361 1361 /*
1362 1362 * We don't need any segment level locks for "segvn" data
1363 1363 * since the address space is "write" locked.
1364 1364 */
1365 1365 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1366 1366
1367 1367 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1368 1368 return (-1);
1369 1369 }
1370 1370
1371 1371 /* first segment is new, try to extend second */
1372 1372 /* XXX - should also check cred */
1373 1373 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1374 1374 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1375 1375 svd2->type != a->type || svd2->flags != a->flags ||
1376 1376 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1377 1377 return (-1);
1378 1378 /* vp == NULL implies zfod, offset doesn't matter */
1379 1379 if (svd2->vp != NULL &&
1380 1380 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1381 1381 return (-1);
1382 1382
1383 1383 if (svd2->tr_state != SEGVN_TR_OFF) {
1384 1384 return (-1);
1385 1385 }
1386 1386
1387 1387 amp2 = svd2->amp;
1388 1388 if (amp2) {
1389 1389 pgcnt_t newpgs;
1390 1390
1391 1391 /*
1392 1392 * Segment has private pages, can data structures
1393 1393 * be expanded?
1394 1394 *
1395 1395 * Acquire the anon_map lock to prevent it from changing,
1396 1396 * if it is shared. This ensures that the anon_map
1397 1397 * will not change while a thread which has a read/write
1398 1398 * lock on an address space references it.
1399 1399 *
1400 1400 * XXX - Don't need the anon_map lock at all if "refcnt"
1401 1401 * is 1.
1402 1402 */
1403 1403 if (svd2->type == MAP_SHARED)
1404 1404 return (-1);
1405 1405
1406 1406 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1407 1407 if (amp2->refcnt > 1) {
1408 1408 ANON_LOCK_EXIT(&2->a_rwlock);
1409 1409 return (-1);
1410 1410 }
1411 1411 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1412 1412 btop(seg2->s_size), btop(seg1->s_size),
1413 1413 ANON_NOSLEEP | ANON_GROWDOWN);
1414 1414
1415 1415 if (newpgs == 0) {
1416 1416 ANON_LOCK_EXIT(&2->a_rwlock);
1417 1417 return (-1);
1418 1418 }
1419 1419 amp2->size = ptob(newpgs);
1420 1420 ANON_LOCK_EXIT(&2->a_rwlock);
1421 1421 }
1422 1422 if (svd2->vpage != NULL) {
1423 1423 struct vpage *vp, *evp;
1424 1424 new_vpage =
1425 1425 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1426 1426 KM_NOSLEEP);
1427 1427 if (new_vpage == NULL) {
1428 1428 /* Not merging segments so adjust anon_index back */
1429 1429 if (amp2)
1430 1430 svd2->anon_index += seg_pages(seg1);
1431 1431 return (-1);
1432 1432 }
1433 1433 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1434 1434 vpgtob(seg_pages(seg2)));
1435 1435 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1436 1436 svd2->vpage = new_vpage;
1437 1437
1438 1438 vp = new_vpage;
1439 1439 evp = vp + seg_pages(seg1);
1440 1440 for (; vp < evp; vp++)
1441 1441 VPP_SETPROT(vp, a->prot);
1442 1442 if (svd2->pageswap && swresv) {
1443 1443 ASSERT(!(svd2->flags & MAP_NORESERVE));
1444 1444 ASSERT(swresv == seg1->s_size);
1445 1445 vp = new_vpage;
1446 1446 for (; vp < evp; vp++) {
1447 1447 VPP_SETSWAPRES(vp);
1448 1448 }
1449 1449 }
1450 1450 }
1451 1451 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1452 1452 size = seg1->s_size;
1453 1453 seg_free(seg1);
1454 1454 seg2->s_size += size;
1455 1455 seg2->s_base -= size;
1456 1456 svd2->offset -= size;
1457 1457 svd2->swresv += swresv;
1458 1458 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1459 1459 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1460 1460 (svd2->vp->v_flag & VVMEXEC)) {
1461 1461 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1462 1462 segvn_inval_trcache(svd2->vp);
1463 1463 }
1464 1464 return (0);
1465 1465 }
1466 1466
1467 1467 static int
1468 1468 segvn_dup(struct seg *seg, struct seg *newseg)
1469 1469 {
1470 1470 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1471 1471 struct segvn_data *newsvd;
1472 1472 pgcnt_t npages = seg_pages(seg);
1473 1473 int error = 0;
1474 1474 uint_t prot;
1475 1475 size_t len;
1476 1476 struct anon_map *amp;
1477 1477
1478 1478 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1479 1479 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1480 1480
1481 1481 /*
1482 1482 * If segment has anon reserved, reserve more for the new seg.
1483 1483 * For a MAP_NORESERVE segment swresv will be a count of all the
1484 1484 * allocated anon slots; thus we reserve for the child as many slots
1485 1485 * as the parent has allocated. This semantic prevents the child or
1486 1486 * parent from dieing during a copy-on-write fault caused by trying
1487 1487 * to write a shared pre-existing anon page.
1488 1488 */
1489 1489 if ((len = svd->swresv) != 0) {
1490 1490 if (anon_resv(svd->swresv) == 0)
1491 1491 return (ENOMEM);
1492 1492
1493 1493 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1494 1494 seg, len, 0);
1495 1495 }
1496 1496
1497 1497 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1498 1498
1499 1499 newseg->s_ops = &segvn_ops;
1500 1500 newseg->s_data = (void *)newsvd;
1501 1501 newseg->s_szc = seg->s_szc;
1502 1502
1503 1503 newsvd->seg = newseg;
1504 1504 if ((newsvd->vp = svd->vp) != NULL) {
1505 1505 VN_HOLD(svd->vp);
1506 1506 if (svd->type == MAP_SHARED)
1507 1507 lgrp_shm_policy_init(NULL, svd->vp);
1508 1508 }
1509 1509 newsvd->offset = svd->offset;
1510 1510 newsvd->prot = svd->prot;
1511 1511 newsvd->maxprot = svd->maxprot;
1512 1512 newsvd->pageprot = svd->pageprot;
1513 1513 newsvd->type = svd->type;
1514 1514 newsvd->cred = svd->cred;
1515 1515 crhold(newsvd->cred);
1516 1516 newsvd->advice = svd->advice;
1517 1517 newsvd->pageadvice = svd->pageadvice;
1518 1518 newsvd->swresv = svd->swresv;
1519 1519 newsvd->pageswap = svd->pageswap;
1520 1520 newsvd->flags = svd->flags;
1521 1521 newsvd->softlockcnt = 0;
1522 1522 newsvd->softlockcnt_sbase = 0;
1523 1523 newsvd->softlockcnt_send = 0;
1524 1524 newsvd->policy_info = svd->policy_info;
1525 1525 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1526 1526
1527 1527 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1528 1528 /*
1529 1529 * Not attaching to a shared anon object.
1530 1530 */
1531 1531 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1532 1532 svd->tr_state == SEGVN_TR_OFF);
1533 1533 if (svd->tr_state == SEGVN_TR_ON) {
1534 1534 ASSERT(newsvd->vp != NULL && amp != NULL);
1535 1535 newsvd->tr_state = SEGVN_TR_INIT;
1536 1536 } else {
1537 1537 newsvd->tr_state = svd->tr_state;
1538 1538 }
1539 1539 newsvd->amp = NULL;
1540 1540 newsvd->anon_index = 0;
1541 1541 } else {
1542 1542 /* regions for now are only used on pure vnode segments */
1543 1543 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1544 1544 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1545 1545 newsvd->tr_state = SEGVN_TR_OFF;
1546 1546 if (svd->type == MAP_SHARED) {
1547 1547 newsvd->amp = amp;
1548 1548 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1549 1549 amp->refcnt++;
1550 1550 ANON_LOCK_EXIT(&->a_rwlock);
1551 1551 newsvd->anon_index = svd->anon_index;
1552 1552 } else {
1553 1553 int reclaim = 1;
1554 1554
1555 1555 /*
1556 1556 * Allocate and initialize new anon_map structure.
1557 1557 */
1558 1558 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1559 1559 ANON_SLEEP);
1560 1560 newsvd->amp->a_szc = newseg->s_szc;
1561 1561 newsvd->anon_index = 0;
1562 1562
1563 1563 /*
1564 1564 * We don't have to acquire the anon_map lock
1565 1565 * for the new segment (since it belongs to an
1566 1566 * address space that is still not associated
1567 1567 * with any process), or the segment in the old
1568 1568 * address space (since all threads in it
1569 1569 * are stopped while duplicating the address space).
1570 1570 */
1571 1571
1572 1572 /*
1573 1573 * The goal of the following code is to make sure that
1574 1574 * softlocked pages do not end up as copy on write
1575 1575 * pages. This would cause problems where one
1576 1576 * thread writes to a page that is COW and a different
1577 1577 * thread in the same process has softlocked it. The
1578 1578 * softlock lock would move away from this process
1579 1579 * because the write would cause this process to get
1580 1580 * a copy (without the softlock).
1581 1581 *
1582 1582 * The strategy here is to just break the
1583 1583 * sharing on pages that could possibly be
1584 1584 * softlocked.
1585 1585 */
1586 1586 retry:
1587 1587 if (svd->softlockcnt) {
1588 1588 struct anon *ap, *newap;
1589 1589 size_t i;
1590 1590 uint_t vpprot;
1591 1591 page_t *anon_pl[1+1], *pp;
1592 1592 caddr_t addr;
1593 1593 ulong_t old_idx = svd->anon_index;
1594 1594 ulong_t new_idx = 0;
1595 1595
1596 1596 /*
1597 1597 * The softlock count might be non zero
1598 1598 * because some pages are still stuck in the
1599 1599 * cache for lazy reclaim. Flush the cache
1600 1600 * now. This should drop the count to zero.
1601 1601 * [or there is really I/O going on to these
1602 1602 * pages]. Note, we have the writers lock so
1603 1603 * nothing gets inserted during the flush.
1604 1604 */
1605 1605 if (reclaim == 1) {
1606 1606 segvn_purge(seg);
1607 1607 reclaim = 0;
1608 1608 goto retry;
1609 1609 }
1610 1610 i = btopr(seg->s_size);
1611 1611 addr = seg->s_base;
1612 1612 /*
1613 1613 * XXX break cow sharing using PAGESIZE
1614 1614 * pages. They will be relocated into larger
1615 1615 * pages at fault time.
1616 1616 */
1617 1617 while (i-- > 0) {
1618 1618 if (ap = anon_get_ptr(amp->ahp,
1619 1619 old_idx)) {
1620 1620 error = anon_getpage(&ap,
1621 1621 &vpprot, anon_pl, PAGESIZE,
1622 1622 seg, addr, S_READ,
1623 1623 svd->cred);
1624 1624 if (error) {
1625 1625 newsvd->vpage = NULL;
1626 1626 goto out;
1627 1627 }
1628 1628 /*
1629 1629 * prot need not be computed
1630 1630 * below 'cause anon_private is
1631 1631 * going to ignore it anyway
1632 1632 * as child doesn't inherit
1633 1633 * pagelock from parent.
1634 1634 */
1635 1635 prot = svd->pageprot ?
1636 1636 VPP_PROT(
1637 1637 &svd->vpage[
1638 1638 seg_page(seg, addr)])
1639 1639 : svd->prot;
1640 1640 pp = anon_private(&newap,
1641 1641 newseg, addr, prot,
1642 1642 anon_pl[0], 0,
1643 1643 newsvd->cred);
1644 1644 if (pp == NULL) {
1645 1645 /* no mem abort */
1646 1646 newsvd->vpage = NULL;
1647 1647 error = ENOMEM;
1648 1648 goto out;
1649 1649 }
1650 1650 (void) anon_set_ptr(
1651 1651 newsvd->amp->ahp, new_idx,
1652 1652 newap, ANON_SLEEP);
1653 1653 page_unlock(pp);
1654 1654 }
1655 1655 addr += PAGESIZE;
1656 1656 old_idx++;
1657 1657 new_idx++;
1658 1658 }
1659 1659 } else { /* common case */
1660 1660 if (seg->s_szc != 0) {
1661 1661 /*
1662 1662 * If at least one of anon slots of a
1663 1663 * large page exists then make sure
1664 1664 * all anon slots of a large page
1665 1665 * exist to avoid partial cow sharing
1666 1666 * of a large page in the future.
1667 1667 */
1668 1668 anon_dup_fill_holes(amp->ahp,
1669 1669 svd->anon_index, newsvd->amp->ahp,
1670 1670 0, seg->s_size, seg->s_szc,
1671 1671 svd->vp != NULL);
1672 1672 } else {
1673 1673 anon_dup(amp->ahp, svd->anon_index,
1674 1674 newsvd->amp->ahp, 0, seg->s_size);
1675 1675 }
1676 1676
1677 1677 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1678 1678 seg->s_size, PROT_WRITE);
1679 1679 }
1680 1680 }
1681 1681 }
1682 1682 /*
1683 1683 * If necessary, create a vpage structure for the new segment.
1684 1684 * Do not copy any page lock indications.
1685 1685 */
1686 1686 if (svd->vpage != NULL) {
1687 1687 uint_t i;
1688 1688 struct vpage *ovp = svd->vpage;
1689 1689 struct vpage *nvp;
1690 1690
1691 1691 nvp = newsvd->vpage =
1692 1692 kmem_alloc(vpgtob(npages), KM_SLEEP);
1693 1693 for (i = 0; i < npages; i++) {
1694 1694 *nvp = *ovp++;
1695 1695 VPP_CLRPPLOCK(nvp++);
1696 1696 }
1697 1697 } else
1698 1698 newsvd->vpage = NULL;
1699 1699
1700 1700 /* Inform the vnode of the new mapping */
1701 1701 if (newsvd->vp != NULL) {
1702 1702 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1703 1703 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1704 1704 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1705 1705 }
1706 1706 out:
1707 1707 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1708 1708 ASSERT(newsvd->amp == NULL);
1709 1709 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1710 1710 newsvd->rcookie = svd->rcookie;
1711 1711 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1712 1712 }
1713 1713 return (error);
1714 1714 }
1715 1715
1716 1716
1717 1717 /*
1718 1718 * callback function to invoke free_vp_pages() for only those pages actually
1719 1719 * processed by the HAT when a shared region is destroyed.
1720 1720 */
1721 1721 extern int free_pages;
1722 1722
1723 1723 static void
1724 1724 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1725 1725 size_t r_size, void *r_obj, u_offset_t r_objoff)
1726 1726 {
1727 1727 u_offset_t off;
1728 1728 size_t len;
1729 1729 vnode_t *vp = (vnode_t *)r_obj;
1730 1730
1731 1731 ASSERT(eaddr > saddr);
1732 1732 ASSERT(saddr >= r_saddr);
1733 1733 ASSERT(saddr < r_saddr + r_size);
1734 1734 ASSERT(eaddr > r_saddr);
1735 1735 ASSERT(eaddr <= r_saddr + r_size);
1736 1736 ASSERT(vp != NULL);
1737 1737
1738 1738 if (!free_pages) {
1739 1739 return;
1740 1740 }
1741 1741
1742 1742 len = eaddr - saddr;
1743 1743 off = (saddr - r_saddr) + r_objoff;
1744 1744 free_vp_pages(vp, off, len);
1745 1745 }
1746 1746
1747 1747 /*
1748 1748 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1749 1749 * those pages actually processed by the HAT
1750 1750 */
1751 1751 static void
1752 1752 segvn_hat_unload_callback(hat_callback_t *cb)
1753 1753 {
1754 1754 struct seg *seg = cb->hcb_data;
1755 1755 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1756 1756 size_t len;
1757 1757 u_offset_t off;
1758 1758
1759 1759 ASSERT(svd->vp != NULL);
1760 1760 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1761 1761 ASSERT(cb->hcb_start_addr >= seg->s_base);
1762 1762
1763 1763 len = cb->hcb_end_addr - cb->hcb_start_addr;
1764 1764 off = cb->hcb_start_addr - seg->s_base;
1765 1765 free_vp_pages(svd->vp, svd->offset + off, len);
1766 1766 }
1767 1767
1768 1768 /*
1769 1769 * This function determines the number of bytes of swap reserved by
1770 1770 * a segment for which per-page accounting is present. It is used to
1771 1771 * calculate the correct value of a segvn_data's swresv.
1772 1772 */
1773 1773 static size_t
1774 1774 segvn_count_swap_by_vpages(struct seg *seg)
1775 1775 {
1776 1776 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1777 1777 struct vpage *vp, *evp;
1778 1778 size_t nswappages = 0;
1779 1779
1780 1780 ASSERT(svd->pageswap);
1781 1781 ASSERT(svd->vpage != NULL);
1782 1782
1783 1783 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1784 1784
1785 1785 for (vp = svd->vpage; vp < evp; vp++) {
1786 1786 if (VPP_ISSWAPRES(vp))
1787 1787 nswappages++;
1788 1788 }
1789 1789
1790 1790 return (nswappages << PAGESHIFT);
1791 1791 }
1792 1792
1793 1793 static int
1794 1794 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1795 1795 {
1796 1796 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1797 1797 struct segvn_data *nsvd;
1798 1798 struct seg *nseg;
1799 1799 struct anon_map *amp;
1800 1800 pgcnt_t opages; /* old segment size in pages */
1801 1801 pgcnt_t npages; /* new segment size in pages */
1802 1802 pgcnt_t dpages; /* pages being deleted (unmapped) */
1803 1803 hat_callback_t callback; /* used for free_vp_pages() */
1804 1804 hat_callback_t *cbp = NULL;
1805 1805 caddr_t nbase;
1806 1806 size_t nsize;
1807 1807 size_t oswresv;
1808 1808 int reclaim = 1;
1809 1809
1810 1810 /*
1811 1811 * We don't need any segment level locks for "segvn" data
1812 1812 * since the address space is "write" locked.
1813 1813 */
1814 1814 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1815 1815
1816 1816 /*
1817 1817 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1818 1818 * softlockcnt is protected from change by the as write lock.
1819 1819 */
1820 1820 retry:
1821 1821 if (svd->softlockcnt > 0) {
1822 1822 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1823 1823
1824 1824 /*
1825 1825 * If this is shared segment non 0 softlockcnt
1826 1826 * means locked pages are still in use.
1827 1827 */
1828 1828 if (svd->type == MAP_SHARED) {
1829 1829 return (EAGAIN);
1830 1830 }
1831 1831
1832 1832 /*
1833 1833 * since we do have the writers lock nobody can fill
1834 1834 * the cache during the purge. The flush either succeeds
1835 1835 * or we still have pending I/Os.
1836 1836 */
1837 1837 if (reclaim == 1) {
1838 1838 segvn_purge(seg);
1839 1839 reclaim = 0;
1840 1840 goto retry;
1841 1841 }
1842 1842 return (EAGAIN);
1843 1843 }
1844 1844
1845 1845 /*
1846 1846 * Check for bad sizes
1847 1847 */
1848 1848 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1849 1849 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1850 1850 panic("segvn_unmap");
1851 1851 /*NOTREACHED*/
1852 1852 }
1853 1853
1854 1854 if (seg->s_szc != 0) {
1855 1855 size_t pgsz = page_get_pagesize(seg->s_szc);
1856 1856 int err;
1857 1857 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1858 1858 ASSERT(seg->s_base != addr || seg->s_size != len);
1859 1859 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1860 1860 ASSERT(svd->amp == NULL);
1861 1861 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1862 1862 hat_leave_region(seg->s_as->a_hat,
1863 1863 svd->rcookie, HAT_REGION_TEXT);
1864 1864 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1865 1865 /*
1866 1866 * could pass a flag to segvn_demote_range()
1867 1867 * below to tell it not to do any unloads but
1868 1868 * this case is rare enough to not bother for
1869 1869 * now.
1870 1870 */
1871 1871 } else if (svd->tr_state == SEGVN_TR_INIT) {
1872 1872 svd->tr_state = SEGVN_TR_OFF;
1873 1873 } else if (svd->tr_state == SEGVN_TR_ON) {
1874 1874 ASSERT(svd->amp != NULL);
1875 1875 segvn_textunrepl(seg, 1);
1876 1876 ASSERT(svd->amp == NULL);
1877 1877 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1878 1878 }
1879 1879 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1880 1880 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1881 1881 if (err == 0) {
1882 1882 return (IE_RETRY);
1883 1883 }
1884 1884 return (err);
1885 1885 }
1886 1886 }
1887 1887
1888 1888 /* Inform the vnode of the unmapping. */
1889 1889 if (svd->vp) {
1890 1890 int error;
1891 1891
1892 1892 error = VOP_DELMAP(svd->vp,
1893 1893 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1894 1894 seg->s_as, addr, len, svd->prot, svd->maxprot,
1895 1895 svd->type, svd->cred, NULL);
1896 1896
1897 1897 if (error == EAGAIN)
1898 1898 return (error);
1899 1899 }
1900 1900
1901 1901 /*
1902 1902 * Remove any page locks set through this mapping.
1903 1903 * If text replication is not off no page locks could have been
1904 1904 * established via this mapping.
1905 1905 */
1906 1906 if (svd->tr_state == SEGVN_TR_OFF) {
1907 1907 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1908 1908 }
1909 1909
1910 1910 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1911 1911 ASSERT(svd->amp == NULL);
1912 1912 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1913 1913 ASSERT(svd->type == MAP_PRIVATE);
1914 1914 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1915 1915 HAT_REGION_TEXT);
1916 1916 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1917 1917 } else if (svd->tr_state == SEGVN_TR_ON) {
1918 1918 ASSERT(svd->amp != NULL);
1919 1919 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1920 1920 segvn_textunrepl(seg, 1);
1921 1921 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1922 1922 } else {
1923 1923 if (svd->tr_state != SEGVN_TR_OFF) {
1924 1924 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1925 1925 svd->tr_state = SEGVN_TR_OFF;
1926 1926 }
1927 1927 /*
1928 1928 * Unload any hardware translations in the range to be taken
1929 1929 * out. Use a callback to invoke free_vp_pages() effectively.
1930 1930 */
1931 1931 if (svd->vp != NULL && free_pages != 0) {
1932 1932 callback.hcb_data = seg;
1933 1933 callback.hcb_function = segvn_hat_unload_callback;
1934 1934 cbp = &callback;
1935 1935 }
1936 1936 hat_unload_callback(seg->s_as->a_hat, addr, len,
1937 1937 HAT_UNLOAD_UNMAP, cbp);
1938 1938
1939 1939 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1940 1940 (svd->vp->v_flag & VVMEXEC) &&
1941 1941 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
1942 1942 segvn_inval_trcache(svd->vp);
1943 1943 }
1944 1944 }
1945 1945
1946 1946 /*
1947 1947 * Check for entire segment
1948 1948 */
1949 1949 if (addr == seg->s_base && len == seg->s_size) {
1950 1950 seg_free(seg);
1951 1951 return (0);
1952 1952 }
1953 1953
1954 1954 opages = seg_pages(seg);
1955 1955 dpages = btop(len);
1956 1956 npages = opages - dpages;
1957 1957 amp = svd->amp;
1958 1958 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
1959 1959
1960 1960 /*
1961 1961 * Check for beginning of segment
1962 1962 */
1963 1963 if (addr == seg->s_base) {
1964 1964 if (svd->vpage != NULL) {
1965 1965 size_t nbytes;
1966 1966 struct vpage *ovpage;
1967 1967
1968 1968 ovpage = svd->vpage; /* keep pointer to vpage */
1969 1969
1970 1970 nbytes = vpgtob(npages);
1971 1971 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
1972 1972 bcopy(&ovpage[dpages], svd->vpage, nbytes);
1973 1973
1974 1974 /* free up old vpage */
1975 1975 kmem_free(ovpage, vpgtob(opages));
1976 1976 }
1977 1977 if (amp != NULL) {
1978 1978 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1979 1979 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
1980 1980 /*
1981 1981 * Shared anon map is no longer in use. Before
1982 1982 * freeing its pages purge all entries from
1983 1983 * pcache that belong to this amp.
1984 1984 */
1985 1985 if (svd->type == MAP_SHARED) {
1986 1986 ASSERT(amp->refcnt == 1);
1987 1987 ASSERT(svd->softlockcnt == 0);
1988 1988 anonmap_purge(amp);
1989 1989 }
1990 1990 /*
1991 1991 * Free up now unused parts of anon_map array.
1992 1992 */
1993 1993 if (amp->a_szc == seg->s_szc) {
1994 1994 if (seg->s_szc != 0) {
1995 1995 anon_free_pages(amp->ahp,
1996 1996 svd->anon_index, len,
1997 1997 seg->s_szc);
1998 1998 } else {
1999 1999 anon_free(amp->ahp,
2000 2000 svd->anon_index,
2001 2001 len);
2002 2002 }
2003 2003 } else {
2004 2004 ASSERT(svd->type == MAP_SHARED);
2005 2005 ASSERT(amp->a_szc > seg->s_szc);
2006 2006 anon_shmap_free_pages(amp,
2007 2007 svd->anon_index, len);
2008 2008 }
2009 2009
2010 2010 /*
2011 2011 * Unreserve swap space for the
2012 2012 * unmapped chunk of this segment in
2013 2013 * case it's MAP_SHARED
2014 2014 */
2015 2015 if (svd->type == MAP_SHARED) {
2016 2016 anon_unresv_zone(len,
2017 2017 seg->s_as->a_proc->p_zone);
2018 2018 amp->swresv -= len;
2019 2019 }
2020 2020 }
2021 2021 ANON_LOCK_EXIT(&->a_rwlock);
2022 2022 svd->anon_index += dpages;
2023 2023 }
2024 2024 if (svd->vp != NULL)
2025 2025 svd->offset += len;
2026 2026
2027 2027 seg->s_base += len;
2028 2028 seg->s_size -= len;
2029 2029
2030 2030 if (svd->swresv) {
2031 2031 if (svd->flags & MAP_NORESERVE) {
2032 2032 ASSERT(amp);
2033 2033 oswresv = svd->swresv;
2034 2034
2035 2035 svd->swresv = ptob(anon_pages(amp->ahp,
2036 2036 svd->anon_index, npages));
2037 2037 anon_unresv_zone(oswresv - svd->swresv,
2038 2038 seg->s_as->a_proc->p_zone);
2039 2039 if (SEG_IS_PARTIAL_RESV(seg))
2040 2040 seg->s_as->a_resvsize -= oswresv -
2041 2041 svd->swresv;
2042 2042 } else {
2043 2043 size_t unlen;
2044 2044
2045 2045 if (svd->pageswap) {
2046 2046 oswresv = svd->swresv;
2047 2047 svd->swresv =
2048 2048 segvn_count_swap_by_vpages(seg);
2049 2049 ASSERT(oswresv >= svd->swresv);
2050 2050 unlen = oswresv - svd->swresv;
2051 2051 } else {
2052 2052 svd->swresv -= len;
2053 2053 ASSERT(svd->swresv == seg->s_size);
2054 2054 unlen = len;
2055 2055 }
2056 2056 anon_unresv_zone(unlen,
2057 2057 seg->s_as->a_proc->p_zone);
2058 2058 }
2059 2059 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2060 2060 seg, len, 0);
2061 2061 }
2062 2062
2063 2063 return (0);
2064 2064 }
2065 2065
2066 2066 /*
2067 2067 * Check for end of segment
2068 2068 */
2069 2069 if (addr + len == seg->s_base + seg->s_size) {
2070 2070 if (svd->vpage != NULL) {
2071 2071 size_t nbytes;
2072 2072 struct vpage *ovpage;
2073 2073
2074 2074 ovpage = svd->vpage; /* keep pointer to vpage */
2075 2075
2076 2076 nbytes = vpgtob(npages);
2077 2077 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2078 2078 bcopy(ovpage, svd->vpage, nbytes);
2079 2079
2080 2080 /* free up old vpage */
2081 2081 kmem_free(ovpage, vpgtob(opages));
2082 2082
2083 2083 }
2084 2084 if (amp != NULL) {
2085 2085 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2086 2086 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2087 2087 /*
2088 2088 * Free up now unused parts of anon_map array.
2089 2089 */
2090 2090 ulong_t an_idx = svd->anon_index + npages;
2091 2091
2092 2092 /*
2093 2093 * Shared anon map is no longer in use. Before
2094 2094 * freeing its pages purge all entries from
2095 2095 * pcache that belong to this amp.
2096 2096 */
2097 2097 if (svd->type == MAP_SHARED) {
2098 2098 ASSERT(amp->refcnt == 1);
2099 2099 ASSERT(svd->softlockcnt == 0);
2100 2100 anonmap_purge(amp);
2101 2101 }
2102 2102
2103 2103 if (amp->a_szc == seg->s_szc) {
2104 2104 if (seg->s_szc != 0) {
2105 2105 anon_free_pages(amp->ahp,
2106 2106 an_idx, len,
2107 2107 seg->s_szc);
2108 2108 } else {
2109 2109 anon_free(amp->ahp, an_idx,
2110 2110 len);
2111 2111 }
2112 2112 } else {
2113 2113 ASSERT(svd->type == MAP_SHARED);
2114 2114 ASSERT(amp->a_szc > seg->s_szc);
2115 2115 anon_shmap_free_pages(amp,
2116 2116 an_idx, len);
2117 2117 }
2118 2118
2119 2119 /*
2120 2120 * Unreserve swap space for the
2121 2121 * unmapped chunk of this segment in
2122 2122 * case it's MAP_SHARED
2123 2123 */
2124 2124 if (svd->type == MAP_SHARED) {
2125 2125 anon_unresv_zone(len,
2126 2126 seg->s_as->a_proc->p_zone);
2127 2127 amp->swresv -= len;
2128 2128 }
2129 2129 }
2130 2130 ANON_LOCK_EXIT(&->a_rwlock);
2131 2131 }
2132 2132
2133 2133 seg->s_size -= len;
2134 2134
2135 2135 if (svd->swresv) {
2136 2136 if (svd->flags & MAP_NORESERVE) {
2137 2137 ASSERT(amp);
2138 2138 oswresv = svd->swresv;
2139 2139 svd->swresv = ptob(anon_pages(amp->ahp,
2140 2140 svd->anon_index, npages));
2141 2141 anon_unresv_zone(oswresv - svd->swresv,
2142 2142 seg->s_as->a_proc->p_zone);
2143 2143 if (SEG_IS_PARTIAL_RESV(seg))
2144 2144 seg->s_as->a_resvsize -= oswresv -
2145 2145 svd->swresv;
2146 2146 } else {
2147 2147 size_t unlen;
2148 2148
2149 2149 if (svd->pageswap) {
2150 2150 oswresv = svd->swresv;
2151 2151 svd->swresv =
2152 2152 segvn_count_swap_by_vpages(seg);
2153 2153 ASSERT(oswresv >= svd->swresv);
2154 2154 unlen = oswresv - svd->swresv;
2155 2155 } else {
2156 2156 svd->swresv -= len;
2157 2157 ASSERT(svd->swresv == seg->s_size);
2158 2158 unlen = len;
2159 2159 }
2160 2160 anon_unresv_zone(unlen,
2161 2161 seg->s_as->a_proc->p_zone);
2162 2162 }
2163 2163 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2164 2164 "anon proc:%p %lu %u", seg, len, 0);
2165 2165 }
2166 2166
2167 2167 return (0);
2168 2168 }
2169 2169
2170 2170 /*
2171 2171 * The section to go is in the middle of the segment,
2172 2172 * have to make it into two segments. nseg is made for
2173 2173 * the high end while seg is cut down at the low end.
2174 2174 */
2175 2175 nbase = addr + len; /* new seg base */
2176 2176 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2177 2177 seg->s_size = addr - seg->s_base; /* shrink old seg */
2178 2178 nseg = seg_alloc(seg->s_as, nbase, nsize);
2179 2179 if (nseg == NULL) {
2180 2180 panic("segvn_unmap seg_alloc");
2181 2181 /*NOTREACHED*/
2182 2182 }
2183 2183 nseg->s_ops = seg->s_ops;
2184 2184 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2185 2185 nseg->s_data = (void *)nsvd;
2186 2186 nseg->s_szc = seg->s_szc;
2187 2187 *nsvd = *svd;
2188 2188 nsvd->seg = nseg;
2189 2189 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2190 2190 nsvd->swresv = 0;
2191 2191 nsvd->softlockcnt = 0;
2192 2192 nsvd->softlockcnt_sbase = 0;
2193 2193 nsvd->softlockcnt_send = 0;
2194 2194 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2195 2195
2196 2196 if (svd->vp != NULL) {
2197 2197 VN_HOLD(nsvd->vp);
2198 2198 if (nsvd->type == MAP_SHARED)
2199 2199 lgrp_shm_policy_init(NULL, nsvd->vp);
2200 2200 }
2201 2201 crhold(svd->cred);
2202 2202
2203 2203 if (svd->vpage == NULL) {
2204 2204 nsvd->vpage = NULL;
2205 2205 } else {
2206 2206 /* need to split vpage into two arrays */
2207 2207 size_t nbytes;
2208 2208 struct vpage *ovpage;
2209 2209
2210 2210 ovpage = svd->vpage; /* keep pointer to vpage */
2211 2211
2212 2212 npages = seg_pages(seg); /* seg has shrunk */
2213 2213 nbytes = vpgtob(npages);
2214 2214 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2215 2215
2216 2216 bcopy(ovpage, svd->vpage, nbytes);
2217 2217
2218 2218 npages = seg_pages(nseg);
2219 2219 nbytes = vpgtob(npages);
2220 2220 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2221 2221
2222 2222 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2223 2223
2224 2224 /* free up old vpage */
2225 2225 kmem_free(ovpage, vpgtob(opages));
2226 2226 }
2227 2227
2228 2228 if (amp == NULL) {
2229 2229 nsvd->amp = NULL;
2230 2230 nsvd->anon_index = 0;
2231 2231 } else {
2232 2232 /*
2233 2233 * Need to create a new anon map for the new segment.
2234 2234 * We'll also allocate a new smaller array for the old
2235 2235 * smaller segment to save space.
2236 2236 */
2237 2237 opages = btop((uintptr_t)(addr - seg->s_base));
2238 2238 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2239 2239 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2240 2240 /*
2241 2241 * Free up now unused parts of anon_map array.
2242 2242 */
2243 2243 ulong_t an_idx = svd->anon_index + opages;
2244 2244
2245 2245 /*
2246 2246 * Shared anon map is no longer in use. Before
2247 2247 * freeing its pages purge all entries from
2248 2248 * pcache that belong to this amp.
2249 2249 */
2250 2250 if (svd->type == MAP_SHARED) {
2251 2251 ASSERT(amp->refcnt == 1);
2252 2252 ASSERT(svd->softlockcnt == 0);
2253 2253 anonmap_purge(amp);
2254 2254 }
2255 2255
2256 2256 if (amp->a_szc == seg->s_szc) {
2257 2257 if (seg->s_szc != 0) {
2258 2258 anon_free_pages(amp->ahp, an_idx, len,
2259 2259 seg->s_szc);
2260 2260 } else {
2261 2261 anon_free(amp->ahp, an_idx,
2262 2262 len);
2263 2263 }
2264 2264 } else {
2265 2265 ASSERT(svd->type == MAP_SHARED);
2266 2266 ASSERT(amp->a_szc > seg->s_szc);
2267 2267 anon_shmap_free_pages(amp, an_idx, len);
2268 2268 }
2269 2269
2270 2270 /*
2271 2271 * Unreserve swap space for the
2272 2272 * unmapped chunk of this segment in
2273 2273 * case it's MAP_SHARED
2274 2274 */
2275 2275 if (svd->type == MAP_SHARED) {
2276 2276 anon_unresv_zone(len,
2277 2277 seg->s_as->a_proc->p_zone);
2278 2278 amp->swresv -= len;
2279 2279 }
2280 2280 }
2281 2281 nsvd->anon_index = svd->anon_index +
2282 2282 btop((uintptr_t)(nseg->s_base - seg->s_base));
2283 2283 if (svd->type == MAP_SHARED) {
2284 2284 amp->refcnt++;
2285 2285 nsvd->amp = amp;
2286 2286 } else {
2287 2287 struct anon_map *namp;
2288 2288 struct anon_hdr *nahp;
2289 2289
2290 2290 ASSERT(svd->type == MAP_PRIVATE);
2291 2291 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2292 2292 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2293 2293 namp->a_szc = seg->s_szc;
2294 2294 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2295 2295 0, btop(seg->s_size), ANON_SLEEP);
2296 2296 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2297 2297 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2298 2298 anon_release(amp->ahp, btop(amp->size));
2299 2299 svd->anon_index = 0;
2300 2300 nsvd->anon_index = 0;
2301 2301 amp->ahp = nahp;
2302 2302 amp->size = seg->s_size;
2303 2303 nsvd->amp = namp;
2304 2304 }
2305 2305 ANON_LOCK_EXIT(&->a_rwlock);
2306 2306 }
2307 2307 if (svd->swresv) {
2308 2308 if (svd->flags & MAP_NORESERVE) {
2309 2309 ASSERT(amp);
2310 2310 oswresv = svd->swresv;
2311 2311 svd->swresv = ptob(anon_pages(amp->ahp,
2312 2312 svd->anon_index, btop(seg->s_size)));
2313 2313 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2314 2314 nsvd->anon_index, btop(nseg->s_size)));
2315 2315 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2316 2316 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2317 2317 seg->s_as->a_proc->p_zone);
2318 2318 if (SEG_IS_PARTIAL_RESV(seg))
2319 2319 seg->s_as->a_resvsize -= oswresv -
2320 2320 (svd->swresv + nsvd->swresv);
2321 2321 } else {
2322 2322 size_t unlen;
2323 2323
2324 2324 if (svd->pageswap) {
2325 2325 oswresv = svd->swresv;
2326 2326 svd->swresv = segvn_count_swap_by_vpages(seg);
2327 2327 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2328 2328 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2329 2329 unlen = oswresv - (svd->swresv + nsvd->swresv);
2330 2330 } else {
2331 2331 if (seg->s_size + nseg->s_size + len !=
2332 2332 svd->swresv) {
2333 2333 panic("segvn_unmap: cannot split "
2334 2334 "swap reservation");
2335 2335 /*NOTREACHED*/
2336 2336 }
2337 2337 svd->swresv = seg->s_size;
2338 2338 nsvd->swresv = nseg->s_size;
2339 2339 unlen = len;
2340 2340 }
2341 2341 anon_unresv_zone(unlen,
2342 2342 seg->s_as->a_proc->p_zone);
2343 2343 }
2344 2344 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2345 2345 seg, len, 0);
2346 2346 }
2347 2347
2348 2348 return (0); /* I'm glad that's all over with! */
2349 2349 }
2350 2350
2351 2351 static void
2352 2352 segvn_free(struct seg *seg)
2353 2353 {
2354 2354 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2355 2355 pgcnt_t npages = seg_pages(seg);
2356 2356 struct anon_map *amp;
2357 2357 size_t len;
2358 2358
2359 2359 /*
2360 2360 * We don't need any segment level locks for "segvn" data
2361 2361 * since the address space is "write" locked.
2362 2362 */
2363 2363 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2364 2364 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2365 2365
2366 2366 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2367 2367
2368 2368 /*
2369 2369 * Be sure to unlock pages. XXX Why do things get free'ed instead
2370 2370 * of unmapped? XXX
2371 2371 */
2372 2372 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2373 2373 0, MC_UNLOCK, NULL, 0);
2374 2374
2375 2375 /*
2376 2376 * Deallocate the vpage and anon pointers if necessary and possible.
2377 2377 */
2378 2378 if (svd->vpage != NULL) {
2379 2379 kmem_free(svd->vpage, vpgtob(npages));
2380 2380 svd->vpage = NULL;
2381 2381 }
2382 2382 if ((amp = svd->amp) != NULL) {
2383 2383 /*
2384 2384 * If there are no more references to this anon_map
2385 2385 * structure, then deallocate the structure after freeing
2386 2386 * up all the anon slot pointers that we can.
2387 2387 */
2388 2388 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2389 2389 ASSERT(amp->a_szc >= seg->s_szc);
2390 2390 if (--amp->refcnt == 0) {
2391 2391 if (svd->type == MAP_PRIVATE) {
2392 2392 /*
2393 2393 * Private - we only need to anon_free
2394 2394 * the part that this segment refers to.
2395 2395 */
2396 2396 if (seg->s_szc != 0) {
2397 2397 anon_free_pages(amp->ahp,
2398 2398 svd->anon_index, seg->s_size,
2399 2399 seg->s_szc);
2400 2400 } else {
2401 2401 anon_free(amp->ahp, svd->anon_index,
2402 2402 seg->s_size);
2403 2403 }
2404 2404 } else {
2405 2405
2406 2406 /*
2407 2407 * Shared anon map is no longer in use. Before
2408 2408 * freeing its pages purge all entries from
2409 2409 * pcache that belong to this amp.
2410 2410 */
2411 2411 ASSERT(svd->softlockcnt == 0);
2412 2412 anonmap_purge(amp);
2413 2413
2414 2414 /*
2415 2415 * Shared - anon_free the entire
2416 2416 * anon_map's worth of stuff and
2417 2417 * release any swap reservation.
2418 2418 */
2419 2419 if (amp->a_szc != 0) {
2420 2420 anon_shmap_free_pages(amp, 0,
2421 2421 amp->size);
2422 2422 } else {
2423 2423 anon_free(amp->ahp, 0, amp->size);
2424 2424 }
2425 2425 if ((len = amp->swresv) != 0) {
2426 2426 anon_unresv_zone(len,
2427 2427 seg->s_as->a_proc->p_zone);
2428 2428 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2429 2429 "anon proc:%p %lu %u", seg, len, 0);
2430 2430 }
2431 2431 }
2432 2432 svd->amp = NULL;
2433 2433 ANON_LOCK_EXIT(&->a_rwlock);
2434 2434 anonmap_free(amp);
2435 2435 } else if (svd->type == MAP_PRIVATE) {
2436 2436 /*
2437 2437 * We had a private mapping which still has
2438 2438 * a held anon_map so just free up all the
2439 2439 * anon slot pointers that we were using.
2440 2440 */
2441 2441 if (seg->s_szc != 0) {
2442 2442 anon_free_pages(amp->ahp, svd->anon_index,
2443 2443 seg->s_size, seg->s_szc);
2444 2444 } else {
2445 2445 anon_free(amp->ahp, svd->anon_index,
2446 2446 seg->s_size);
2447 2447 }
2448 2448 ANON_LOCK_EXIT(&->a_rwlock);
2449 2449 } else {
2450 2450 ANON_LOCK_EXIT(&->a_rwlock);
2451 2451 }
2452 2452 }
2453 2453
2454 2454 /*
2455 2455 * Release swap reservation.
2456 2456 */
2457 2457 if ((len = svd->swresv) != 0) {
2458 2458 anon_unresv_zone(svd->swresv,
2459 2459 seg->s_as->a_proc->p_zone);
2460 2460 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2461 2461 seg, len, 0);
2462 2462 if (SEG_IS_PARTIAL_RESV(seg))
2463 2463 seg->s_as->a_resvsize -= svd->swresv;
2464 2464 svd->swresv = 0;
2465 2465 }
2466 2466 /*
2467 2467 * Release claim on vnode, credentials, and finally free the
2468 2468 * private data.
2469 2469 */
2470 2470 if (svd->vp != NULL) {
2471 2471 if (svd->type == MAP_SHARED)
2472 2472 lgrp_shm_policy_fini(NULL, svd->vp);
2473 2473 VN_RELE(svd->vp);
2474 2474 svd->vp = NULL;
2475 2475 }
2476 2476 crfree(svd->cred);
2477 2477 svd->pageprot = 0;
2478 2478 svd->pageadvice = 0;
2479 2479 svd->pageswap = 0;
2480 2480 svd->cred = NULL;
2481 2481
2482 2482 /*
2483 2483 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2484 2484 * still working with this segment without holding as lock (in case
2485 2485 * it's called by pcache async thread).
2486 2486 */
2487 2487 ASSERT(svd->softlockcnt == 0);
2488 2488 mutex_enter(&svd->segfree_syncmtx);
2489 2489 mutex_exit(&svd->segfree_syncmtx);
2490 2490
2491 2491 seg->s_data = NULL;
2492 2492 kmem_cache_free(segvn_cache, svd);
2493 2493 }
2494 2494
2495 2495 /*
2496 2496 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2497 2497 * already been F_SOFTLOCK'ed.
2498 2498 * Caller must always match addr and len of a softunlock with a previous
2499 2499 * softlock with exactly the same addr and len.
2500 2500 */
2501 2501 static void
2502 2502 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2503 2503 {
2504 2504 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2505 2505 page_t *pp;
2506 2506 caddr_t adr;
2507 2507 struct vnode *vp;
2508 2508 u_offset_t offset;
2509 2509 ulong_t anon_index;
2510 2510 struct anon_map *amp;
2511 2511 struct anon *ap = NULL;
2512 2512
2513 2513 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2514 2514 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2515 2515
2516 2516 if ((amp = svd->amp) != NULL)
2517 2517 anon_index = svd->anon_index + seg_page(seg, addr);
2518 2518
2519 2519 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2520 2520 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2521 2521 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2522 2522 } else {
2523 2523 hat_unlock(seg->s_as->a_hat, addr, len);
2524 2524 }
2525 2525 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2526 2526 if (amp != NULL) {
2527 2527 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2528 2528 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2529 2529 != NULL) {
2530 2530 swap_xlate(ap, &vp, &offset);
2531 2531 } else {
2532 2532 vp = svd->vp;
2533 2533 offset = svd->offset +
2534 2534 (uintptr_t)(adr - seg->s_base);
2535 2535 }
2536 2536 ANON_LOCK_EXIT(&->a_rwlock);
2537 2537 } else {
2538 2538 vp = svd->vp;
2539 2539 offset = svd->offset +
2540 2540 (uintptr_t)(adr - seg->s_base);
2541 2541 }
2542 2542
2543 2543 /*
2544 2544 * Use page_find() instead of page_lookup() to
2545 2545 * find the page since we know that it is locked.
2546 2546 */
2547 2547 pp = page_find(vp, offset);
2548 2548 if (pp == NULL) {
2549 2549 panic(
2550 2550 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2551 2551 (void *)adr, (void *)ap, (void *)vp, offset);
2552 2552 /*NOTREACHED*/
2553 2553 }
2554 2554
2555 2555 if (rw == S_WRITE) {
2556 2556 hat_setrefmod(pp);
2557 2557 if (seg->s_as->a_vbits)
2558 2558 hat_setstat(seg->s_as, adr, PAGESIZE,
2559 2559 P_REF | P_MOD);
2560 2560 } else if (rw != S_OTHER) {
2561 2561 hat_setref(pp);
2562 2562 if (seg->s_as->a_vbits)
2563 2563 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2564 2564 }
2565 2565 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2566 2566 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2567 2567 page_unlock(pp);
2568 2568 }
2569 2569 ASSERT(svd->softlockcnt >= btop(len));
2570 2570 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2571 2571 /*
2572 2572 * All SOFTLOCKS are gone. Wakeup any waiting
2573 2573 * unmappers so they can try again to unmap.
2574 2574 * Check for waiters first without the mutex
2575 2575 * held so we don't always grab the mutex on
2576 2576 * softunlocks.
2577 2577 */
2578 2578 if (AS_ISUNMAPWAIT(seg->s_as)) {
2579 2579 mutex_enter(&seg->s_as->a_contents);
2580 2580 if (AS_ISUNMAPWAIT(seg->s_as)) {
2581 2581 AS_CLRUNMAPWAIT(seg->s_as);
2582 2582 cv_broadcast(&seg->s_as->a_cv);
2583 2583 }
2584 2584 mutex_exit(&seg->s_as->a_contents);
2585 2585 }
2586 2586 }
2587 2587 }
2588 2588
2589 2589 #define PAGE_HANDLED ((page_t *)-1)
2590 2590
2591 2591 /*
2592 2592 * Release all the pages in the NULL terminated ppp list
2593 2593 * which haven't already been converted to PAGE_HANDLED.
2594 2594 */
2595 2595 static void
2596 2596 segvn_pagelist_rele(page_t **ppp)
2597 2597 {
2598 2598 for (; *ppp != NULL; ppp++) {
2599 2599 if (*ppp != PAGE_HANDLED)
2600 2600 page_unlock(*ppp);
2601 2601 }
2602 2602 }
2603 2603
2604 2604 static int stealcow = 1;
2605 2605
2606 2606 /*
2607 2607 * Workaround for viking chip bug. See bug id 1220902.
2608 2608 * To fix this down in pagefault() would require importing so
2609 2609 * much as and segvn code as to be unmaintainable.
2610 2610 */
2611 2611 int enable_mbit_wa = 0;
2612 2612
2613 2613 /*
2614 2614 * Handles all the dirty work of getting the right
2615 2615 * anonymous pages and loading up the translations.
2616 2616 * This routine is called only from segvn_fault()
2617 2617 * when looping over the range of addresses requested.
2618 2618 *
2619 2619 * The basic algorithm here is:
2620 2620 * If this is an anon_zero case
2621 2621 * Call anon_zero to allocate page
2622 2622 * Load up translation
2623 2623 * Return
2624 2624 * endif
2625 2625 * If this is an anon page
2626 2626 * Use anon_getpage to get the page
2627 2627 * else
2628 2628 * Find page in pl[] list passed in
2629 2629 * endif
2630 2630 * If not a cow
2631 2631 * Load up the translation to the page
2632 2632 * return
2633 2633 * endif
2634 2634 * Call anon_private to handle cow
2635 2635 * Load up (writable) translation to new page
2636 2636 */
2637 2637 static faultcode_t
2638 2638 segvn_faultpage(
2639 2639 struct hat *hat, /* the hat to use for mapping */
2640 2640 struct seg *seg, /* seg_vn of interest */
2641 2641 caddr_t addr, /* address in as */
2642 2642 u_offset_t off, /* offset in vp */
2643 2643 struct vpage *vpage, /* pointer to vpage for vp, off */
2644 2644 page_t *pl[], /* object source page pointer */
2645 2645 uint_t vpprot, /* access allowed to object pages */
2646 2646 enum fault_type type, /* type of fault */
2647 2647 enum seg_rw rw, /* type of access at fault */
2648 2648 int brkcow) /* we may need to break cow */
2649 2649 {
2650 2650 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2651 2651 page_t *pp, **ppp;
2652 2652 uint_t pageflags = 0;
2653 2653 page_t *anon_pl[1 + 1];
2654 2654 page_t *opp = NULL; /* original page */
2655 2655 uint_t prot;
2656 2656 int err;
2657 2657 int cow;
2658 2658 int claim;
2659 2659 int steal = 0;
2660 2660 ulong_t anon_index;
2661 2661 struct anon *ap, *oldap;
2662 2662 struct anon_map *amp;
2663 2663 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2664 2664 int anon_lock = 0;
2665 2665 anon_sync_obj_t cookie;
2666 2666
2667 2667 if (svd->flags & MAP_TEXT) {
2668 2668 hat_flag |= HAT_LOAD_TEXT;
2669 2669 }
2670 2670
2671 2671 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2672 2672 ASSERT(seg->s_szc == 0);
2673 2673 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2674 2674
2675 2675 /*
2676 2676 * Initialize protection value for this page.
2677 2677 * If we have per page protection values check it now.
2678 2678 */
2679 2679 if (svd->pageprot) {
2680 2680 uint_t protchk;
2681 2681
2682 2682 switch (rw) {
2683 2683 case S_READ:
2684 2684 protchk = PROT_READ;
2685 2685 break;
2686 2686 case S_WRITE:
2687 2687 protchk = PROT_WRITE;
2688 2688 break;
2689 2689 case S_EXEC:
2690 2690 protchk = PROT_EXEC;
2691 2691 break;
2692 2692 case S_OTHER:
2693 2693 default:
2694 2694 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2695 2695 break;
2696 2696 }
2697 2697
2698 2698 prot = VPP_PROT(vpage);
2699 2699 if ((prot & protchk) == 0)
2700 2700 return (FC_PROT); /* illegal access type */
2701 2701 } else {
2702 2702 prot = svd->prot;
2703 2703 }
2704 2704
2705 2705 if (type == F_SOFTLOCK) {
2706 2706 atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
2707 2707 }
2708 2708
2709 2709 /*
2710 2710 * Always acquire the anon array lock to prevent 2 threads from
2711 2711 * allocating separate anon slots for the same "addr".
2712 2712 */
2713 2713
2714 2714 if ((amp = svd->amp) != NULL) {
2715 2715 ASSERT(RW_READ_HELD(&->a_rwlock));
2716 2716 anon_index = svd->anon_index + seg_page(seg, addr);
2717 2717 anon_array_enter(amp, anon_index, &cookie);
2718 2718 anon_lock = 1;
2719 2719 }
2720 2720
2721 2721 if (svd->vp == NULL && amp != NULL) {
2722 2722 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2723 2723 /*
2724 2724 * Allocate a (normally) writable anonymous page of
2725 2725 * zeroes. If no advance reservations, reserve now.
2726 2726 */
2727 2727 if (svd->flags & MAP_NORESERVE) {
2728 2728 if (anon_resv_zone(ptob(1),
2729 2729 seg->s_as->a_proc->p_zone)) {
2730 2730 atomic_add_long(&svd->swresv, ptob(1));
2731 2731 atomic_add_long(&seg->s_as->a_resvsize,
2732 2732 ptob(1));
2733 2733 } else {
2734 2734 err = ENOMEM;
2735 2735 goto out;
2736 2736 }
2737 2737 }
2738 2738 if ((pp = anon_zero(seg, addr, &ap,
2739 2739 svd->cred)) == NULL) {
2740 2740 err = ENOMEM;
2741 2741 goto out; /* out of swap space */
2742 2742 }
2743 2743 /*
2744 2744 * Re-acquire the anon_map lock and
2745 2745 * initialize the anon array entry.
2746 2746 */
2747 2747 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2748 2748 ANON_SLEEP);
2749 2749
2750 2750 ASSERT(pp->p_szc == 0);
2751 2751
2752 2752 /*
2753 2753 * Handle pages that have been marked for migration
2754 2754 */
2755 2755 if (lgrp_optimizations())
2756 2756 page_migrate(seg, addr, &pp, 1);
2757 2757
2758 2758 if (enable_mbit_wa) {
2759 2759 if (rw == S_WRITE)
2760 2760 hat_setmod(pp);
2761 2761 else if (!hat_ismod(pp))
2762 2762 prot &= ~PROT_WRITE;
2763 2763 }
2764 2764 /*
2765 2765 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2766 2766 * with MC_LOCKAS, MCL_FUTURE) and this is a
2767 2767 * MAP_NORESERVE segment, we may need to
2768 2768 * permanently lock the page as it is being faulted
2769 2769 * for the first time. The following text applies
2770 2770 * only to MAP_NORESERVE segments:
2771 2771 *
2772 2772 * As per memcntl(2), if this segment was created
2773 2773 * after MCL_FUTURE was applied (a "future"
2774 2774 * segment), its pages must be locked. If this
2775 2775 * segment existed at MCL_FUTURE application (a
2776 2776 * "past" segment), the interface is unclear.
2777 2777 *
2778 2778 * We decide to lock only if vpage is present:
2779 2779 *
2780 2780 * - "future" segments will have a vpage array (see
2781 2781 * as_map), and so will be locked as required
2782 2782 *
2783 2783 * - "past" segments may not have a vpage array,
2784 2784 * depending on whether events (such as
2785 2785 * mprotect) have occurred. Locking if vpage
2786 2786 * exists will preserve legacy behavior. Not
2787 2787 * locking if vpage is absent, will not break
2788 2788 * the interface or legacy behavior. Note that
2789 2789 * allocating vpage here if it's absent requires
2790 2790 * upgrading the segvn reader lock, the cost of
2791 2791 * which does not seem worthwhile.
2792 2792 *
2793 2793 * Usually testing and setting VPP_ISPPLOCK and
2794 2794 * VPP_SETPPLOCK requires holding the segvn lock as
2795 2795 * writer, but in this case all readers are
2796 2796 * serializing on the anon array lock.
2797 2797 */
2798 2798 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2799 2799 (svd->flags & MAP_NORESERVE) &&
2800 2800 !VPP_ISPPLOCK(vpage)) {
2801 2801 proc_t *p = seg->s_as->a_proc;
2802 2802 ASSERT(svd->type == MAP_PRIVATE);
2803 2803 mutex_enter(&p->p_lock);
2804 2804 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2805 2805 1) == 0) {
2806 2806 claim = VPP_PROT(vpage) & PROT_WRITE;
2807 2807 if (page_pp_lock(pp, claim, 0)) {
2808 2808 VPP_SETPPLOCK(vpage);
2809 2809 } else {
2810 2810 rctl_decr_locked_mem(p, NULL,
2811 2811 PAGESIZE, 1);
2812 2812 }
2813 2813 }
2814 2814 mutex_exit(&p->p_lock);
2815 2815 }
2816 2816
2817 2817 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2818 2818 hat_memload(hat, addr, pp, prot, hat_flag);
2819 2819
2820 2820 if (!(hat_flag & HAT_LOAD_LOCK))
2821 2821 page_unlock(pp);
2822 2822
2823 2823 anon_array_exit(&cookie);
2824 2824 return (0);
2825 2825 }
2826 2826 }
2827 2827
2828 2828 /*
2829 2829 * Obtain the page structure via anon_getpage() if it is
2830 2830 * a private copy of an object (the result of a previous
2831 2831 * copy-on-write).
2832 2832 */
2833 2833 if (amp != NULL) {
2834 2834 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2835 2835 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2836 2836 seg, addr, rw, svd->cred);
2837 2837 if (err)
2838 2838 goto out;
2839 2839
2840 2840 if (svd->type == MAP_SHARED) {
2841 2841 /*
2842 2842 * If this is a shared mapping to an
2843 2843 * anon_map, then ignore the write
2844 2844 * permissions returned by anon_getpage().
2845 2845 * They apply to the private mappings
2846 2846 * of this anon_map.
2847 2847 */
2848 2848 vpprot |= PROT_WRITE;
2849 2849 }
2850 2850 opp = anon_pl[0];
2851 2851 }
2852 2852 }
2853 2853
2854 2854 /*
2855 2855 * Search the pl[] list passed in if it is from the
2856 2856 * original object (i.e., not a private copy).
2857 2857 */
2858 2858 if (opp == NULL) {
2859 2859 /*
2860 2860 * Find original page. We must be bringing it in
2861 2861 * from the list in pl[].
2862 2862 */
2863 2863 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2864 2864 if (opp == PAGE_HANDLED)
2865 2865 continue;
2866 2866 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2867 2867 if (opp->p_offset == off)
2868 2868 break;
2869 2869 }
2870 2870 if (opp == NULL) {
2871 2871 panic("segvn_faultpage not found");
2872 2872 /*NOTREACHED*/
2873 2873 }
2874 2874 *ppp = PAGE_HANDLED;
2875 2875
2876 2876 }
2877 2877
2878 2878 ASSERT(PAGE_LOCKED(opp));
2879 2879
2880 2880 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2881 2881 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2882 2882
2883 2883 /*
2884 2884 * The fault is treated as a copy-on-write fault if a
2885 2885 * write occurs on a private segment and the object
2886 2886 * page (i.e., mapping) is write protected. We assume
2887 2887 * that fatal protection checks have already been made.
2888 2888 */
2889 2889
2890 2890 if (brkcow) {
2891 2891 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2892 2892 cow = !(vpprot & PROT_WRITE);
2893 2893 } else if (svd->tr_state == SEGVN_TR_ON) {
2894 2894 /*
2895 2895 * If we are doing text replication COW on first touch.
2896 2896 */
2897 2897 ASSERT(amp != NULL);
2898 2898 ASSERT(svd->vp != NULL);
2899 2899 ASSERT(rw != S_WRITE);
2900 2900 cow = (ap == NULL);
2901 2901 } else {
2902 2902 cow = 0;
2903 2903 }
2904 2904
2905 2905 /*
2906 2906 * If not a copy-on-write case load the translation
2907 2907 * and return.
2908 2908 */
2909 2909 if (cow == 0) {
2910 2910
2911 2911 /*
2912 2912 * Handle pages that have been marked for migration
2913 2913 */
2914 2914 if (lgrp_optimizations())
2915 2915 page_migrate(seg, addr, &opp, 1);
2916 2916
2917 2917 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2918 2918 if (rw == S_WRITE)
2919 2919 hat_setmod(opp);
2920 2920 else if (rw != S_OTHER && !hat_ismod(opp))
2921 2921 prot &= ~PROT_WRITE;
2922 2922 }
2923 2923
2924 2924 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2925 2925 (!svd->pageprot && svd->prot == (prot & vpprot)));
2926 2926 ASSERT(amp == NULL ||
2927 2927 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2928 2928 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2929 2929 svd->rcookie);
2930 2930
2931 2931 if (!(hat_flag & HAT_LOAD_LOCK))
2932 2932 page_unlock(opp);
2933 2933
2934 2934 if (anon_lock) {
2935 2935 anon_array_exit(&cookie);
2936 2936 }
2937 2937 return (0);
2938 2938 }
2939 2939
2940 2940 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2941 2941
2942 2942 hat_setref(opp);
2943 2943
2944 2944 ASSERT(amp != NULL && anon_lock);
2945 2945
2946 2946 /*
2947 2947 * Steal the page only if it isn't a private page
2948 2948 * since stealing a private page is not worth the effort.
2949 2949 */
2950 2950 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
2951 2951 steal = 1;
2952 2952
2953 2953 /*
2954 2954 * Steal the original page if the following conditions are true:
2955 2955 *
2956 2956 * We are low on memory, the page is not private, page is not large,
2957 2957 * not shared, not modified, not `locked' or if we have it `locked'
2958 2958 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
2959 2959 * that the page is not shared) and if it doesn't have any
2960 2960 * translations. page_struct_lock isn't needed to look at p_cowcnt
2961 2961 * and p_lckcnt because we first get exclusive lock on page.
2962 2962 */
2963 2963 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
2964 2964
2965 2965 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
2966 2966 page_tryupgrade(opp) && !hat_ismod(opp) &&
2967 2967 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
2968 2968 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
2969 2969 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
2970 2970 /*
2971 2971 * Check if this page has other translations
2972 2972 * after unloading our translation.
2973 2973 */
2974 2974 if (hat_page_is_mapped(opp)) {
2975 2975 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2976 2976 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
2977 2977 HAT_UNLOAD);
2978 2978 }
2979 2979
2980 2980 /*
2981 2981 * hat_unload() might sync back someone else's recent
2982 2982 * modification, so check again.
2983 2983 */
2984 2984 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
2985 2985 pageflags |= STEAL_PAGE;
2986 2986 }
2987 2987
2988 2988 /*
2989 2989 * If we have a vpage pointer, see if it indicates that we have
2990 2990 * ``locked'' the page we map -- if so, tell anon_private to
2991 2991 * transfer the locking resource to the new page.
2992 2992 *
2993 2993 * See Statement at the beginning of segvn_lockop regarding
2994 2994 * the way lockcnts/cowcnts are handled during COW.
2995 2995 *
2996 2996 */
2997 2997 if (vpage != NULL && VPP_ISPPLOCK(vpage))
2998 2998 pageflags |= LOCK_PAGE;
2999 2999
3000 3000 /*
3001 3001 * Allocate a private page and perform the copy.
3002 3002 * For MAP_NORESERVE reserve swap space now, unless this
3003 3003 * is a cow fault on an existing anon page in which case
3004 3004 * MAP_NORESERVE will have made advance reservations.
3005 3005 */
3006 3006 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3007 3007 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3008 3008 atomic_add_long(&svd->swresv, ptob(1));
3009 3009 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3010 3010 } else {
3011 3011 page_unlock(opp);
3012 3012 err = ENOMEM;
3013 3013 goto out;
3014 3014 }
3015 3015 }
3016 3016 oldap = ap;
3017 3017 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3018 3018 if (pp == NULL) {
3019 3019 err = ENOMEM; /* out of swap space */
3020 3020 goto out;
3021 3021 }
3022 3022
3023 3023 /*
3024 3024 * If we copied away from an anonymous page, then
3025 3025 * we are one step closer to freeing up an anon slot.
3026 3026 *
3027 3027 * NOTE: The original anon slot must be released while
3028 3028 * holding the "anon_map" lock. This is necessary to prevent
3029 3029 * other threads from obtaining a pointer to the anon slot
3030 3030 * which may be freed if its "refcnt" is 1.
3031 3031 */
3032 3032 if (oldap != NULL)
3033 3033 anon_decref(oldap);
3034 3034
3035 3035 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3036 3036
3037 3037 /*
3038 3038 * Handle pages that have been marked for migration
3039 3039 */
3040 3040 if (lgrp_optimizations())
3041 3041 page_migrate(seg, addr, &pp, 1);
3042 3042
3043 3043 ASSERT(pp->p_szc == 0);
3044 3044
3045 3045 ASSERT(!IS_VMODSORT(pp->p_vnode));
3046 3046 if (enable_mbit_wa) {
3047 3047 if (rw == S_WRITE)
3048 3048 hat_setmod(pp);
3049 3049 else if (!hat_ismod(pp))
3050 3050 prot &= ~PROT_WRITE;
3051 3051 }
3052 3052
3053 3053 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3054 3054 hat_memload(hat, addr, pp, prot, hat_flag);
3055 3055
3056 3056 if (!(hat_flag & HAT_LOAD_LOCK))
3057 3057 page_unlock(pp);
3058 3058
3059 3059 ASSERT(anon_lock);
3060 3060 anon_array_exit(&cookie);
3061 3061 return (0);
3062 3062 out:
3063 3063 if (anon_lock)
3064 3064 anon_array_exit(&cookie);
3065 3065
3066 3066 if (type == F_SOFTLOCK) {
3067 3067 atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
3068 3068 }
3069 3069 return (FC_MAKE_ERR(err));
3070 3070 }
3071 3071
3072 3072 /*
3073 3073 * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 3074 * pages must be complete pages smaller than replacement pages.
3075 3075 * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 3076 * complete large pages locked SHARED.
3077 3077 */
3078 3078 static void
3079 3079 segvn_relocate_pages(page_t **targ, page_t *replacement)
3080 3080 {
3081 3081 page_t *pp;
3082 3082 pgcnt_t repl_npgs, curnpgs;
3083 3083 pgcnt_t i;
3084 3084 uint_t repl_szc = replacement->p_szc;
3085 3085 page_t *first_repl = replacement;
3086 3086 page_t *repl;
3087 3087 spgcnt_t npgs;
3088 3088
3089 3089 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3090 3090
3091 3091 ASSERT(repl_szc != 0);
3092 3092 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3093 3093
3094 3094 i = 0;
3095 3095 while (repl_npgs) {
3096 3096 spgcnt_t nreloc;
3097 3097 int err;
3098 3098 ASSERT(replacement != NULL);
3099 3099 pp = targ[i];
3100 3100 ASSERT(pp->p_szc < repl_szc);
3101 3101 ASSERT(PAGE_EXCL(pp));
3102 3102 ASSERT(!PP_ISFREE(pp));
3103 3103 curnpgs = page_get_pagecnt(pp->p_szc);
3104 3104 if (curnpgs == 1) {
3105 3105 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3106 3106 repl = replacement;
3107 3107 page_sub(&replacement, repl);
3108 3108 ASSERT(PAGE_EXCL(repl));
3109 3109 ASSERT(!PP_ISFREE(repl));
3110 3110 ASSERT(repl->p_szc == repl_szc);
3111 3111 } else {
3112 3112 page_t *repl_savepp;
3113 3113 int j;
3114 3114 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3115 3115 repl_savepp = replacement;
3116 3116 for (j = 0; j < curnpgs; j++) {
3117 3117 repl = replacement;
3118 3118 page_sub(&replacement, repl);
3119 3119 ASSERT(PAGE_EXCL(repl));
3120 3120 ASSERT(!PP_ISFREE(repl));
3121 3121 ASSERT(repl->p_szc == repl_szc);
3122 3122 ASSERT(page_pptonum(targ[i + j]) ==
3123 3123 page_pptonum(targ[i]) + j);
3124 3124 }
3125 3125 repl = repl_savepp;
3126 3126 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3127 3127 }
3128 3128 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3129 3129 if (err || nreloc != curnpgs) {
3130 3130 panic("segvn_relocate_pages: "
3131 3131 "page_relocate failed err=%d curnpgs=%ld "
3132 3132 "nreloc=%ld", err, curnpgs, nreloc);
3133 3133 }
3134 3134 ASSERT(curnpgs <= repl_npgs);
3135 3135 repl_npgs -= curnpgs;
3136 3136 i += curnpgs;
3137 3137 }
3138 3138 ASSERT(replacement == NULL);
3139 3139
3140 3140 repl = first_repl;
3141 3141 repl_npgs = npgs;
3142 3142 for (i = 0; i < repl_npgs; i++) {
3143 3143 ASSERT(PAGE_EXCL(repl));
3144 3144 ASSERT(!PP_ISFREE(repl));
3145 3145 targ[i] = repl;
3146 3146 page_downgrade(targ[i]);
3147 3147 repl++;
3148 3148 }
3149 3149 }
3150 3150
3151 3151 /*
3152 3152 * Check if all pages in ppa array are complete smaller than szc pages and
3153 3153 * their roots will still be aligned relative to their current size if the
3154 3154 * entire ppa array is relocated into one szc page. If these conditions are
3155 3155 * not met return 0.
3156 3156 *
3157 3157 * If all pages are properly aligned attempt to upgrade their locks
3158 3158 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3159 3159 * upgrdfail was set to 0 by caller.
3160 3160 *
3161 3161 * Return 1 if all pages are aligned and locked exclusively.
3162 3162 *
3163 3163 * If all pages in ppa array happen to be physically contiguous to make one
3164 3164 * szc page and all exclusive locks are successfully obtained promote the page
3165 3165 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3166 3166 */
3167 3167 static int
3168 3168 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3169 3169 {
3170 3170 page_t *pp;
3171 3171 pfn_t pfn;
3172 3172 pgcnt_t totnpgs = page_get_pagecnt(szc);
3173 3173 pfn_t first_pfn;
3174 3174 int contig = 1;
3175 3175 pgcnt_t i;
3176 3176 pgcnt_t j;
3177 3177 uint_t curszc;
3178 3178 pgcnt_t curnpgs;
3179 3179 int root = 0;
3180 3180
3181 3181 ASSERT(szc > 0);
3182 3182
3183 3183 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3184 3184
3185 3185 for (i = 0; i < totnpgs; i++) {
3186 3186 pp = ppa[i];
3187 3187 ASSERT(PAGE_SHARED(pp));
3188 3188 ASSERT(!PP_ISFREE(pp));
3189 3189 pfn = page_pptonum(pp);
3190 3190 if (i == 0) {
3191 3191 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3192 3192 contig = 0;
3193 3193 } else {
3194 3194 first_pfn = pfn;
3195 3195 }
3196 3196 } else if (contig && pfn != first_pfn + i) {
3197 3197 contig = 0;
3198 3198 }
3199 3199 if (pp->p_szc == 0) {
3200 3200 if (root) {
3201 3201 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3202 3202 return (0);
3203 3203 }
3204 3204 } else if (!root) {
3205 3205 if ((curszc = pp->p_szc) >= szc) {
3206 3206 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3207 3207 return (0);
3208 3208 }
3209 3209 if (curszc == 0) {
3210 3210 /*
3211 3211 * p_szc changed means we don't have all pages
3212 3212 * locked. return failure.
3213 3213 */
3214 3214 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3215 3215 return (0);
3216 3216 }
3217 3217 curnpgs = page_get_pagecnt(curszc);
3218 3218 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3219 3219 !IS_P2ALIGNED(i, curnpgs)) {
3220 3220 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3221 3221 return (0);
3222 3222 }
3223 3223 root = 1;
3224 3224 } else {
3225 3225 ASSERT(i > 0);
3226 3226 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3227 3227 if (pp->p_szc != curszc) {
3228 3228 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3229 3229 return (0);
3230 3230 }
3231 3231 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3232 3232 panic("segvn_full_szcpages: "
3233 3233 "large page not physically contiguous");
3234 3234 }
3235 3235 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3236 3236 root = 0;
3237 3237 }
3238 3238 }
3239 3239 }
3240 3240
3241 3241 for (i = 0; i < totnpgs; i++) {
3242 3242 ASSERT(ppa[i]->p_szc < szc);
3243 3243 if (!page_tryupgrade(ppa[i])) {
3244 3244 for (j = 0; j < i; j++) {
3245 3245 page_downgrade(ppa[j]);
3246 3246 }
3247 3247 *pszc = ppa[i]->p_szc;
3248 3248 *upgrdfail = 1;
3249 3249 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3250 3250 return (0);
3251 3251 }
3252 3252 }
3253 3253
3254 3254 /*
3255 3255 * When a page is put a free cachelist its szc is set to 0. if file
3256 3256 * system reclaimed pages from cachelist targ pages will be physically
3257 3257 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3258 3258 * pages without any relocations.
3259 3259 * To avoid any hat issues with previous small mappings
3260 3260 * hat_pageunload() the target pages first.
3261 3261 */
3262 3262 if (contig) {
3263 3263 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3264 3264 for (i = 0; i < totnpgs; i++) {
3265 3265 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3266 3266 }
3267 3267 for (i = 0; i < totnpgs; i++) {
3268 3268 ppa[i]->p_szc = szc;
3269 3269 }
3270 3270 for (i = 0; i < totnpgs; i++) {
3271 3271 ASSERT(PAGE_EXCL(ppa[i]));
3272 3272 page_downgrade(ppa[i]);
3273 3273 }
3274 3274 if (pszc != NULL) {
3275 3275 *pszc = szc;
3276 3276 }
3277 3277 }
3278 3278 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3279 3279 return (1);
3280 3280 }
3281 3281
3282 3282 /*
3283 3283 * Create physically contiguous pages for [vp, off] - [vp, off +
3284 3284 * page_size(szc)) range and for private segment return them in ppa array.
3285 3285 * Pages are created either via IO or relocations.
3286 3286 *
3287 3287 * Return 1 on success and 0 on failure.
3288 3288 *
3289 3289 * If physically contiguous pages already exist for this range return 1 without
3290 3290 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3291 3291 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3292 3292 */
3293 3293
3294 3294 static int
3295 3295 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3296 3296 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3297 3297 int *downsize)
3298 3298
3299 3299 {
3300 3300 page_t *pplist = *ppplist;
3301 3301 size_t pgsz = page_get_pagesize(szc);
3302 3302 pgcnt_t pages = btop(pgsz);
3303 3303 ulong_t start_off = off;
3304 3304 u_offset_t eoff = off + pgsz;
3305 3305 spgcnt_t nreloc;
3306 3306 u_offset_t io_off = off;
3307 3307 size_t io_len;
3308 3308 page_t *io_pplist = NULL;
3309 3309 page_t *done_pplist = NULL;
3310 3310 pgcnt_t pgidx = 0;
3311 3311 page_t *pp;
3312 3312 page_t *newpp;
3313 3313 page_t *targpp;
3314 3314 int io_err = 0;
3315 3315 int i;
3316 3316 pfn_t pfn;
3317 3317 ulong_t ppages;
3318 3318 page_t *targ_pplist = NULL;
3319 3319 page_t *repl_pplist = NULL;
3320 3320 page_t *tmp_pplist;
3321 3321 int nios = 0;
3322 3322 uint_t pszc;
3323 3323 struct vattr va;
3324 3324
3325 3325 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3326 3326
3327 3327 ASSERT(szc != 0);
3328 3328 ASSERT(pplist->p_szc == szc);
3329 3329
3330 3330 /*
3331 3331 * downsize will be set to 1 only if we fail to lock pages. this will
3332 3332 * allow subsequent faults to try to relocate the page again. If we
3333 3333 * fail due to misalignment don't downsize and let the caller map the
3334 3334 * whole region with small mappings to avoid more faults into the area
3335 3335 * where we can't get large pages anyway.
3336 3336 */
3337 3337 *downsize = 0;
3338 3338
3339 3339 while (off < eoff) {
3340 3340 newpp = pplist;
3341 3341 ASSERT(newpp != NULL);
3342 3342 ASSERT(PAGE_EXCL(newpp));
3343 3343 ASSERT(!PP_ISFREE(newpp));
3344 3344 /*
3345 3345 * we pass NULL for nrelocp to page_lookup_create()
3346 3346 * so that it doesn't relocate. We relocate here
3347 3347 * later only after we make sure we can lock all
3348 3348 * pages in the range we handle and they are all
3349 3349 * aligned.
3350 3350 */
3351 3351 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3352 3352 ASSERT(pp != NULL);
3353 3353 ASSERT(!PP_ISFREE(pp));
3354 3354 ASSERT(pp->p_vnode == vp);
3355 3355 ASSERT(pp->p_offset == off);
3356 3356 if (pp == newpp) {
3357 3357 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3358 3358 page_sub(&pplist, pp);
3359 3359 ASSERT(PAGE_EXCL(pp));
3360 3360 ASSERT(page_iolock_assert(pp));
3361 3361 page_list_concat(&io_pplist, &pp);
3362 3362 off += PAGESIZE;
3363 3363 continue;
3364 3364 }
3365 3365 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3366 3366 pfn = page_pptonum(pp);
3367 3367 pszc = pp->p_szc;
3368 3368 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3369 3369 IS_P2ALIGNED(pfn, pages)) {
3370 3370 ASSERT(repl_pplist == NULL);
3371 3371 ASSERT(done_pplist == NULL);
3372 3372 ASSERT(pplist == *ppplist);
3373 3373 page_unlock(pp);
3374 3374 page_free_replacement_page(pplist);
3375 3375 page_create_putback(pages);
3376 3376 *ppplist = NULL;
3377 3377 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3378 3378 return (1);
3379 3379 }
3380 3380 if (pszc >= szc) {
3381 3381 page_unlock(pp);
3382 3382 segvn_faultvnmpss_align_err1++;
3383 3383 goto out;
3384 3384 }
3385 3385 ppages = page_get_pagecnt(pszc);
3386 3386 if (!IS_P2ALIGNED(pfn, ppages)) {
3387 3387 ASSERT(pszc > 0);
3388 3388 /*
3389 3389 * sizing down to pszc won't help.
3390 3390 */
3391 3391 page_unlock(pp);
3392 3392 segvn_faultvnmpss_align_err2++;
3393 3393 goto out;
3394 3394 }
3395 3395 pfn = page_pptonum(newpp);
3396 3396 if (!IS_P2ALIGNED(pfn, ppages)) {
3397 3397 ASSERT(pszc > 0);
3398 3398 /*
3399 3399 * sizing down to pszc won't help.
3400 3400 */
3401 3401 page_unlock(pp);
3402 3402 segvn_faultvnmpss_align_err3++;
3403 3403 goto out;
3404 3404 }
3405 3405 if (!PAGE_EXCL(pp)) {
3406 3406 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3407 3407 page_unlock(pp);
3408 3408 *downsize = 1;
3409 3409 *ret_pszc = pp->p_szc;
3410 3410 goto out;
3411 3411 }
3412 3412 targpp = pp;
3413 3413 if (io_pplist != NULL) {
3414 3414 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3415 3415 io_len = off - io_off;
3416 3416 /*
3417 3417 * Some file systems like NFS don't check EOF
3418 3418 * conditions in VOP_PAGEIO(). Check it here
3419 3419 * now that pages are locked SE_EXCL. Any file
3420 3420 * truncation will wait until the pages are
3421 3421 * unlocked so no need to worry that file will
3422 3422 * be truncated after we check its size here.
3423 3423 * XXX fix NFS to remove this check.
3424 3424 */
3425 3425 va.va_mask = AT_SIZE;
3426 3426 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3427 3427 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3428 3428 page_unlock(targpp);
3429 3429 goto out;
3430 3430 }
3431 3431 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3432 3432 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3433 3433 *downsize = 1;
3434 3434 *ret_pszc = 0;
3435 3435 page_unlock(targpp);
3436 3436 goto out;
3437 3437 }
3438 3438 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3439 3439 B_READ, svd->cred, NULL);
3440 3440 if (io_err) {
3441 3441 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3442 3442 page_unlock(targpp);
3443 3443 if (io_err == EDEADLK) {
3444 3444 segvn_vmpss_pageio_deadlk_err++;
3445 3445 }
3446 3446 goto out;
3447 3447 }
3448 3448 nios++;
3449 3449 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3450 3450 while (io_pplist != NULL) {
3451 3451 pp = io_pplist;
3452 3452 page_sub(&io_pplist, pp);
3453 3453 ASSERT(page_iolock_assert(pp));
3454 3454 page_io_unlock(pp);
3455 3455 pgidx = (pp->p_offset - start_off) >>
3456 3456 PAGESHIFT;
3457 3457 ASSERT(pgidx < pages);
3458 3458 ppa[pgidx] = pp;
3459 3459 page_list_concat(&done_pplist, &pp);
3460 3460 }
3461 3461 }
3462 3462 pp = targpp;
3463 3463 ASSERT(PAGE_EXCL(pp));
3464 3464 ASSERT(pp->p_szc <= pszc);
3465 3465 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3466 3466 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3467 3467 page_unlock(pp);
3468 3468 *downsize = 1;
3469 3469 *ret_pszc = pp->p_szc;
3470 3470 goto out;
3471 3471 }
3472 3472 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3473 3473 /*
3474 3474 * page szc chould have changed before the entire group was
3475 3475 * locked. reread page szc.
3476 3476 */
3477 3477 pszc = pp->p_szc;
3478 3478 ppages = page_get_pagecnt(pszc);
3479 3479
3480 3480 /* link just the roots */
3481 3481 page_list_concat(&targ_pplist, &pp);
3482 3482 page_sub(&pplist, newpp);
3483 3483 page_list_concat(&repl_pplist, &newpp);
3484 3484 off += PAGESIZE;
3485 3485 while (--ppages != 0) {
3486 3486 newpp = pplist;
3487 3487 page_sub(&pplist, newpp);
3488 3488 off += PAGESIZE;
3489 3489 }
3490 3490 io_off = off;
3491 3491 }
3492 3492 if (io_pplist != NULL) {
3493 3493 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3494 3494 io_len = eoff - io_off;
3495 3495 va.va_mask = AT_SIZE;
3496 3496 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3497 3497 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3498 3498 goto out;
3499 3499 }
3500 3500 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3501 3501 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3502 3502 *downsize = 1;
3503 3503 *ret_pszc = 0;
3504 3504 goto out;
3505 3505 }
3506 3506 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3507 3507 B_READ, svd->cred, NULL);
3508 3508 if (io_err) {
3509 3509 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3510 3510 if (io_err == EDEADLK) {
3511 3511 segvn_vmpss_pageio_deadlk_err++;
3512 3512 }
3513 3513 goto out;
3514 3514 }
3515 3515 nios++;
3516 3516 while (io_pplist != NULL) {
3517 3517 pp = io_pplist;
3518 3518 page_sub(&io_pplist, pp);
3519 3519 ASSERT(page_iolock_assert(pp));
3520 3520 page_io_unlock(pp);
3521 3521 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3522 3522 ASSERT(pgidx < pages);
3523 3523 ppa[pgidx] = pp;
3524 3524 }
3525 3525 }
3526 3526 /*
3527 3527 * we're now bound to succeed or panic.
3528 3528 * remove pages from done_pplist. it's not needed anymore.
3529 3529 */
3530 3530 while (done_pplist != NULL) {
3531 3531 pp = done_pplist;
3532 3532 page_sub(&done_pplist, pp);
3533 3533 }
3534 3534 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3535 3535 ASSERT(pplist == NULL);
3536 3536 *ppplist = NULL;
3537 3537 while (targ_pplist != NULL) {
3538 3538 int ret;
3539 3539 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3540 3540 ASSERT(repl_pplist);
3541 3541 pp = targ_pplist;
3542 3542 page_sub(&targ_pplist, pp);
3543 3543 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3544 3544 newpp = repl_pplist;
3545 3545 page_sub(&repl_pplist, newpp);
3546 3546 #ifdef DEBUG
3547 3547 pfn = page_pptonum(pp);
3548 3548 pszc = pp->p_szc;
3549 3549 ppages = page_get_pagecnt(pszc);
3550 3550 ASSERT(IS_P2ALIGNED(pfn, ppages));
3551 3551 pfn = page_pptonum(newpp);
3552 3552 ASSERT(IS_P2ALIGNED(pfn, ppages));
3553 3553 ASSERT(P2PHASE(pfn, pages) == pgidx);
3554 3554 #endif
3555 3555 nreloc = 0;
3556 3556 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3557 3557 if (ret != 0 || nreloc == 0) {
3558 3558 panic("segvn_fill_vp_pages: "
3559 3559 "page_relocate failed");
3560 3560 }
3561 3561 pp = newpp;
3562 3562 while (nreloc-- != 0) {
3563 3563 ASSERT(PAGE_EXCL(pp));
3564 3564 ASSERT(pp->p_vnode == vp);
3565 3565 ASSERT(pgidx ==
3566 3566 ((pp->p_offset - start_off) >> PAGESHIFT));
3567 3567 ppa[pgidx++] = pp;
3568 3568 pp++;
3569 3569 }
3570 3570 }
3571 3571
3572 3572 if (svd->type == MAP_PRIVATE) {
3573 3573 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3574 3574 for (i = 0; i < pages; i++) {
3575 3575 ASSERT(ppa[i] != NULL);
3576 3576 ASSERT(PAGE_EXCL(ppa[i]));
3577 3577 ASSERT(ppa[i]->p_vnode == vp);
3578 3578 ASSERT(ppa[i]->p_offset ==
3579 3579 start_off + (i << PAGESHIFT));
3580 3580 page_downgrade(ppa[i]);
3581 3581 }
3582 3582 ppa[pages] = NULL;
3583 3583 } else {
3584 3584 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3585 3585 /*
3586 3586 * the caller will still call VOP_GETPAGE() for shared segments
3587 3587 * to check FS write permissions. For private segments we map
3588 3588 * file read only anyway. so no VOP_GETPAGE is needed.
3589 3589 */
3590 3590 for (i = 0; i < pages; i++) {
3591 3591 ASSERT(ppa[i] != NULL);
3592 3592 ASSERT(PAGE_EXCL(ppa[i]));
3593 3593 ASSERT(ppa[i]->p_vnode == vp);
3594 3594 ASSERT(ppa[i]->p_offset ==
3595 3595 start_off + (i << PAGESHIFT));
3596 3596 page_unlock(ppa[i]);
3597 3597 }
3598 3598 ppa[0] = NULL;
3599 3599 }
3600 3600
3601 3601 return (1);
3602 3602 out:
3603 3603 /*
3604 3604 * Do the cleanup. Unlock target pages we didn't relocate. They are
3605 3605 * linked on targ_pplist by root pages. reassemble unused replacement
3606 3606 * and io pages back to pplist.
3607 3607 */
3608 3608 if (io_pplist != NULL) {
3609 3609 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3610 3610 pp = io_pplist;
3611 3611 do {
3612 3612 ASSERT(pp->p_vnode == vp);
3613 3613 ASSERT(pp->p_offset == io_off);
3614 3614 ASSERT(page_iolock_assert(pp));
3615 3615 page_io_unlock(pp);
3616 3616 page_hashout(pp, NULL);
3617 3617 io_off += PAGESIZE;
3618 3618 } while ((pp = pp->p_next) != io_pplist);
3619 3619 page_list_concat(&io_pplist, &pplist);
3620 3620 pplist = io_pplist;
3621 3621 }
3622 3622 tmp_pplist = NULL;
3623 3623 while (targ_pplist != NULL) {
3624 3624 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3625 3625 pp = targ_pplist;
3626 3626 ASSERT(PAGE_EXCL(pp));
3627 3627 page_sub(&targ_pplist, pp);
3628 3628
3629 3629 pszc = pp->p_szc;
3630 3630 ppages = page_get_pagecnt(pszc);
3631 3631 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3632 3632
3633 3633 if (pszc != 0) {
3634 3634 group_page_unlock(pp);
3635 3635 }
3636 3636 page_unlock(pp);
3637 3637
3638 3638 pp = repl_pplist;
3639 3639 ASSERT(pp != NULL);
3640 3640 ASSERT(PAGE_EXCL(pp));
3641 3641 ASSERT(pp->p_szc == szc);
3642 3642 page_sub(&repl_pplist, pp);
3643 3643
3644 3644 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3645 3645
3646 3646 /* relink replacement page */
3647 3647 page_list_concat(&tmp_pplist, &pp);
3648 3648 while (--ppages != 0) {
3649 3649 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3650 3650 pp++;
3651 3651 ASSERT(PAGE_EXCL(pp));
3652 3652 ASSERT(pp->p_szc == szc);
3653 3653 page_list_concat(&tmp_pplist, &pp);
3654 3654 }
3655 3655 }
3656 3656 if (tmp_pplist != NULL) {
3657 3657 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3658 3658 page_list_concat(&tmp_pplist, &pplist);
3659 3659 pplist = tmp_pplist;
3660 3660 }
3661 3661 /*
3662 3662 * at this point all pages are either on done_pplist or
3663 3663 * pplist. They can't be all on done_pplist otherwise
3664 3664 * we'd've been done.
3665 3665 */
3666 3666 ASSERT(pplist != NULL);
3667 3667 if (nios != 0) {
3668 3668 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3669 3669 pp = pplist;
3670 3670 do {
3671 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3672 3672 ASSERT(pp->p_szc == szc);
3673 3673 ASSERT(PAGE_EXCL(pp));
3674 3674 ASSERT(pp->p_vnode != vp);
3675 3675 pp->p_szc = 0;
3676 3676 } while ((pp = pp->p_next) != pplist);
3677 3677
3678 3678 pp = done_pplist;
3679 3679 do {
3680 3680 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3681 3681 ASSERT(pp->p_szc == szc);
3682 3682 ASSERT(PAGE_EXCL(pp));
3683 3683 ASSERT(pp->p_vnode == vp);
3684 3684 pp->p_szc = 0;
3685 3685 } while ((pp = pp->p_next) != done_pplist);
3686 3686
3687 3687 while (pplist != NULL) {
3688 3688 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3689 3689 pp = pplist;
3690 3690 page_sub(&pplist, pp);
3691 3691 page_free(pp, 0);
3692 3692 }
3693 3693
3694 3694 while (done_pplist != NULL) {
3695 3695 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3696 3696 pp = done_pplist;
3697 3697 page_sub(&done_pplist, pp);
3698 3698 page_unlock(pp);
3699 3699 }
3700 3700 *ppplist = NULL;
3701 3701 return (0);
3702 3702 }
3703 3703 ASSERT(pplist == *ppplist);
3704 3704 if (io_err) {
3705 3705 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3706 3706 /*
3707 3707 * don't downsize on io error.
3708 3708 * see if vop_getpage succeeds.
3709 3709 * pplist may still be used in this case
3710 3710 * for relocations.
3711 3711 */
3712 3712 return (0);
3713 3713 }
3714 3714 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3715 3715 page_free_replacement_page(pplist);
3716 3716 page_create_putback(pages);
3717 3717 *ppplist = NULL;
3718 3718 return (0);
3719 3719 }
3720 3720
3721 3721 int segvn_anypgsz = 0;
3722 3722
3723 3723 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3724 3724 if ((type) == F_SOFTLOCK) { \
3725 3725 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3726 3726 -(pages)); \
3727 3727 }
3728 3728
3729 3729 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3730 3730 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3731 3731 if ((rw) == S_WRITE) { \
3732 3732 for (i = 0; i < (pages); i++) { \
3733 3733 ASSERT((ppa)[i]->p_vnode == \
3734 3734 (ppa)[0]->p_vnode); \
3735 3735 hat_setmod((ppa)[i]); \
3736 3736 } \
3737 3737 } else if ((rw) != S_OTHER && \
3738 3738 ((prot) & (vpprot) & PROT_WRITE)) { \
3739 3739 for (i = 0; i < (pages); i++) { \
3740 3740 ASSERT((ppa)[i]->p_vnode == \
3741 3741 (ppa)[0]->p_vnode); \
3742 3742 if (!hat_ismod((ppa)[i])) { \
3743 3743 prot &= ~PROT_WRITE; \
3744 3744 break; \
3745 3745 } \
3746 3746 } \
3747 3747 } \
3748 3748 }
3749 3749
3750 3750 #ifdef VM_STATS
3751 3751
3752 3752 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3753 3753 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3754 3754
3755 3755 #else /* VM_STATS */
3756 3756
3757 3757 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3758 3758
3759 3759 #endif
3760 3760
3761 3761 static faultcode_t
3762 3762 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3763 3763 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3764 3764 caddr_t eaddr, int brkcow)
3765 3765 {
3766 3766 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3767 3767 struct anon_map *amp = svd->amp;
3768 3768 uchar_t segtype = svd->type;
3769 3769 uint_t szc = seg->s_szc;
3770 3770 size_t pgsz = page_get_pagesize(szc);
3771 3771 size_t maxpgsz = pgsz;
3772 3772 pgcnt_t pages = btop(pgsz);
3773 3773 pgcnt_t maxpages = pages;
3774 3774 size_t ppasize = (pages + 1) * sizeof (page_t *);
3775 3775 caddr_t a = lpgaddr;
3776 3776 caddr_t maxlpgeaddr = lpgeaddr;
3777 3777 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3778 3778 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3779 3779 struct vpage *vpage = (svd->vpage != NULL) ?
3780 3780 &svd->vpage[seg_page(seg, a)] : NULL;
3781 3781 vnode_t *vp = svd->vp;
3782 3782 page_t **ppa;
3783 3783 uint_t pszc;
3784 3784 size_t ppgsz;
3785 3785 pgcnt_t ppages;
3786 3786 faultcode_t err = 0;
3787 3787 int ierr;
3788 3788 int vop_size_err = 0;
3789 3789 uint_t protchk, prot, vpprot;
3790 3790 ulong_t i;
3791 3791 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3792 3792 anon_sync_obj_t an_cookie;
3793 3793 enum seg_rw arw;
3794 3794 int alloc_failed = 0;
3795 3795 int adjszc_chk;
3796 3796 struct vattr va;
3797 3797 int xhat = 0;
3798 3798 page_t *pplist;
3799 3799 pfn_t pfn;
3800 3800 int physcontig;
3801 3801 int upgrdfail;
3802 3802 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3803 3803 int tron = (svd->tr_state == SEGVN_TR_ON);
3804 3804
3805 3805 ASSERT(szc != 0);
3806 3806 ASSERT(vp != NULL);
3807 3807 ASSERT(brkcow == 0 || amp != NULL);
3808 3808 ASSERT(tron == 0 || amp != NULL);
3809 3809 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3810 3810 ASSERT(!(svd->flags & MAP_NORESERVE));
3811 3811 ASSERT(type != F_SOFTUNLOCK);
3812 3812 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3813 3813 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3814 3814 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3815 3815 ASSERT(seg->s_szc < NBBY * sizeof (int));
3816 3816 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3817 3817 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3818 3818
3819 3819 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3820 3820 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3821 3821
3822 3822 if (svd->flags & MAP_TEXT) {
3823 3823 hat_flag |= HAT_LOAD_TEXT;
3824 3824 }
3825 3825
3826 3826 if (svd->pageprot) {
3827 3827 switch (rw) {
3828 3828 case S_READ:
3829 3829 protchk = PROT_READ;
3830 3830 break;
3831 3831 case S_WRITE:
3832 3832 protchk = PROT_WRITE;
3833 3833 break;
3834 3834 case S_EXEC:
3835 3835 protchk = PROT_EXEC;
3836 3836 break;
3837 3837 case S_OTHER:
3838 3838 default:
3839 3839 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3840 3840 break;
3841 3841 }
3842 3842 } else {
3843 3843 prot = svd->prot;
3844 3844 /* caller has already done segment level protection check. */
3845 3845 }
3846 3846
3847 3847 if (seg->s_as->a_hat != hat) {
3848 3848 xhat = 1;
3849 3849 }
3850 3850
3851 3851 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3852 3852 SEGVN_VMSTAT_FLTVNPAGES(2);
3853 3853 arw = S_READ;
3854 3854 } else {
3855 3855 arw = rw;
3856 3856 }
3857 3857
3858 3858 ppa = kmem_alloc(ppasize, KM_SLEEP);
3859 3859
3860 3860 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3861 3861
3862 3862 for (;;) {
3863 3863 adjszc_chk = 0;
3864 3864 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3865 3865 if (adjszc_chk) {
3866 3866 while (szc < seg->s_szc) {
3867 3867 uintptr_t e;
3868 3868 uint_t tszc;
3869 3869 tszc = segvn_anypgsz_vnode ? szc + 1 :
3870 3870 seg->s_szc;
3871 3871 ppgsz = page_get_pagesize(tszc);
3872 3872 if (!IS_P2ALIGNED(a, ppgsz) ||
3873 3873 ((alloc_failed >> tszc) & 0x1)) {
3874 3874 break;
3875 3875 }
3876 3876 SEGVN_VMSTAT_FLTVNPAGES(4);
3877 3877 szc = tszc;
3878 3878 pgsz = ppgsz;
3879 3879 pages = btop(pgsz);
3880 3880 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3881 3881 lpgeaddr = (caddr_t)e;
3882 3882 }
3883 3883 }
3884 3884
3885 3885 again:
3886 3886 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3887 3887 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3888 3888 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3889 3889 anon_array_enter(amp, aindx, &an_cookie);
3890 3890 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3891 3891 SEGVN_VMSTAT_FLTVNPAGES(5);
3892 3892 ASSERT(anon_pages(amp->ahp, aindx,
3893 3893 maxpages) == maxpages);
3894 3894 anon_array_exit(&an_cookie);
3895 3895 ANON_LOCK_EXIT(&->a_rwlock);
3896 3896 err = segvn_fault_anonpages(hat, seg,
3897 3897 a, a + maxpgsz, type, rw,
3898 3898 MAX(a, addr),
3899 3899 MIN(a + maxpgsz, eaddr), brkcow);
3900 3900 if (err != 0) {
3901 3901 SEGVN_VMSTAT_FLTVNPAGES(6);
3902 3902 goto out;
3903 3903 }
3904 3904 if (szc < seg->s_szc) {
3905 3905 szc = seg->s_szc;
3906 3906 pgsz = maxpgsz;
3907 3907 pages = maxpages;
3908 3908 lpgeaddr = maxlpgeaddr;
3909 3909 }
3910 3910 goto next;
3911 3911 } else {
3912 3912 ASSERT(anon_pages(amp->ahp, aindx,
3913 3913 maxpages) == 0);
3914 3914 SEGVN_VMSTAT_FLTVNPAGES(7);
3915 3915 anon_array_exit(&an_cookie);
3916 3916 ANON_LOCK_EXIT(&->a_rwlock);
3917 3917 }
3918 3918 }
3919 3919 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3920 3920 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3921 3921
3922 3922 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3923 3923 ASSERT(vpage != NULL);
3924 3924 prot = VPP_PROT(vpage);
3925 3925 ASSERT(sameprot(seg, a, maxpgsz));
3926 3926 if ((prot & protchk) == 0) {
3927 3927 SEGVN_VMSTAT_FLTVNPAGES(8);
3928 3928 err = FC_PROT;
3929 3929 goto out;
3930 3930 }
3931 3931 }
3932 3932 if (type == F_SOFTLOCK) {
3933 3933 atomic_add_long((ulong_t *)&svd->softlockcnt,
3934 3934 pages);
3935 3935 }
3936 3936
3937 3937 pplist = NULL;
3938 3938 physcontig = 0;
3939 3939 ppa[0] = NULL;
3940 3940 if (!brkcow && !tron && szc &&
3941 3941 !page_exists_physcontig(vp, off, szc,
3942 3942 segtype == MAP_PRIVATE ? ppa : NULL)) {
3943 3943 SEGVN_VMSTAT_FLTVNPAGES(9);
3944 3944 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
3945 3945 szc, 0, 0) && type != F_SOFTLOCK) {
3946 3946 SEGVN_VMSTAT_FLTVNPAGES(10);
3947 3947 pszc = 0;
3948 3948 ierr = -1;
3949 3949 alloc_failed |= (1 << szc);
3950 3950 break;
3951 3951 }
3952 3952 if (pplist != NULL &&
3953 3953 vp->v_mpssdata == SEGVN_PAGEIO) {
3954 3954 int downsize;
3955 3955 SEGVN_VMSTAT_FLTVNPAGES(11);
3956 3956 physcontig = segvn_fill_vp_pages(svd,
3957 3957 vp, off, szc, ppa, &pplist,
3958 3958 &pszc, &downsize);
3959 3959 ASSERT(!physcontig || pplist == NULL);
3960 3960 if (!physcontig && downsize &&
3961 3961 type != F_SOFTLOCK) {
3962 3962 ASSERT(pplist == NULL);
3963 3963 SEGVN_VMSTAT_FLTVNPAGES(12);
3964 3964 ierr = -1;
3965 3965 break;
3966 3966 }
3967 3967 ASSERT(!physcontig ||
3968 3968 segtype == MAP_PRIVATE ||
3969 3969 ppa[0] == NULL);
3970 3970 if (physcontig && ppa[0] == NULL) {
3971 3971 physcontig = 0;
3972 3972 }
3973 3973 }
3974 3974 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
3975 3975 SEGVN_VMSTAT_FLTVNPAGES(13);
3976 3976 ASSERT(segtype == MAP_PRIVATE);
3977 3977 physcontig = 1;
3978 3978 }
3979 3979
3980 3980 if (!physcontig) {
3981 3981 SEGVN_VMSTAT_FLTVNPAGES(14);
3982 3982 ppa[0] = NULL;
3983 3983 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
3984 3984 &vpprot, ppa, pgsz, seg, a, arw,
3985 3985 svd->cred, NULL);
3986 3986 #ifdef DEBUG
3987 3987 if (ierr == 0) {
3988 3988 for (i = 0; i < pages; i++) {
3989 3989 ASSERT(PAGE_LOCKED(ppa[i]));
3990 3990 ASSERT(!PP_ISFREE(ppa[i]));
3991 3991 ASSERT(ppa[i]->p_vnode == vp);
3992 3992 ASSERT(ppa[i]->p_offset ==
3993 3993 off + (i << PAGESHIFT));
3994 3994 }
3995 3995 }
3996 3996 #endif /* DEBUG */
3997 3997 if (segtype == MAP_PRIVATE) {
3998 3998 SEGVN_VMSTAT_FLTVNPAGES(15);
3999 3999 vpprot &= ~PROT_WRITE;
4000 4000 }
4001 4001 } else {
4002 4002 ASSERT(segtype == MAP_PRIVATE);
4003 4003 SEGVN_VMSTAT_FLTVNPAGES(16);
4004 4004 vpprot = PROT_ALL & ~PROT_WRITE;
4005 4005 ierr = 0;
4006 4006 }
4007 4007
4008 4008 if (ierr != 0) {
4009 4009 SEGVN_VMSTAT_FLTVNPAGES(17);
4010 4010 if (pplist != NULL) {
4011 4011 SEGVN_VMSTAT_FLTVNPAGES(18);
4012 4012 page_free_replacement_page(pplist);
4013 4013 page_create_putback(pages);
4014 4014 }
4015 4015 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4016 4016 if (a + pgsz <= eaddr) {
4017 4017 SEGVN_VMSTAT_FLTVNPAGES(19);
4018 4018 err = FC_MAKE_ERR(ierr);
4019 4019 goto out;
4020 4020 }
4021 4021 va.va_mask = AT_SIZE;
4022 4022 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4023 4023 SEGVN_VMSTAT_FLTVNPAGES(20);
4024 4024 err = FC_MAKE_ERR(EIO);
4025 4025 goto out;
4026 4026 }
4027 4027 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4028 4028 SEGVN_VMSTAT_FLTVNPAGES(21);
4029 4029 err = FC_MAKE_ERR(ierr);
4030 4030 goto out;
4031 4031 }
4032 4032 if (btopr(va.va_size) <
4033 4033 btopr(off + (eaddr - a))) {
4034 4034 SEGVN_VMSTAT_FLTVNPAGES(22);
4035 4035 err = FC_MAKE_ERR(ierr);
4036 4036 goto out;
4037 4037 }
4038 4038 if (brkcow || tron || type == F_SOFTLOCK) {
4039 4039 /* can't reduce map area */
4040 4040 SEGVN_VMSTAT_FLTVNPAGES(23);
4041 4041 vop_size_err = 1;
4042 4042 goto out;
4043 4043 }
4044 4044 SEGVN_VMSTAT_FLTVNPAGES(24);
4045 4045 ASSERT(szc != 0);
4046 4046 pszc = 0;
4047 4047 ierr = -1;
4048 4048 break;
4049 4049 }
4050 4050
4051 4051 if (amp != NULL) {
4052 4052 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4053 4053 anon_array_enter(amp, aindx, &an_cookie);
4054 4054 }
4055 4055 if (amp != NULL &&
4056 4056 anon_get_ptr(amp->ahp, aindx) != NULL) {
4057 4057 ulong_t taindx = P2ALIGN(aindx, maxpages);
4058 4058
4059 4059 SEGVN_VMSTAT_FLTVNPAGES(25);
4060 4060 ASSERT(anon_pages(amp->ahp, taindx,
4061 4061 maxpages) == maxpages);
4062 4062 for (i = 0; i < pages; i++) {
4063 4063 page_unlock(ppa[i]);
4064 4064 }
4065 4065 anon_array_exit(&an_cookie);
4066 4066 ANON_LOCK_EXIT(&->a_rwlock);
4067 4067 if (pplist != NULL) {
4068 4068 page_free_replacement_page(pplist);
4069 4069 page_create_putback(pages);
4070 4070 }
4071 4071 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4072 4072 if (szc < seg->s_szc) {
4073 4073 SEGVN_VMSTAT_FLTVNPAGES(26);
4074 4074 /*
4075 4075 * For private segments SOFTLOCK
4076 4076 * either always breaks cow (any rw
4077 4077 * type except S_READ_NOCOW) or
4078 4078 * address space is locked as writer
4079 4079 * (S_READ_NOCOW case) and anon slots
4080 4080 * can't show up on second check.
4081 4081 * Therefore if we are here for
4082 4082 * SOFTLOCK case it must be a cow
4083 4083 * break but cow break never reduces
4084 4084 * szc. text replication (tron) in
4085 4085 * this case works as cow break.
4086 4086 * Thus the assert below.
4087 4087 */
4088 4088 ASSERT(!brkcow && !tron &&
4089 4089 type != F_SOFTLOCK);
4090 4090 pszc = seg->s_szc;
4091 4091 ierr = -2;
4092 4092 break;
4093 4093 }
4094 4094 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4095 4095 goto again;
4096 4096 }
4097 4097 #ifdef DEBUG
4098 4098 if (amp != NULL) {
4099 4099 ulong_t taindx = P2ALIGN(aindx, maxpages);
4100 4100 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4101 4101 }
4102 4102 #endif /* DEBUG */
4103 4103
4104 4104 if (brkcow || tron) {
4105 4105 ASSERT(amp != NULL);
4106 4106 ASSERT(pplist == NULL);
4107 4107 ASSERT(szc == seg->s_szc);
4108 4108 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4109 4109 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4110 4110 SEGVN_VMSTAT_FLTVNPAGES(27);
4111 4111 ierr = anon_map_privatepages(amp, aindx, szc,
4112 4112 seg, a, prot, ppa, vpage, segvn_anypgsz,
4113 4113 tron ? PG_LOCAL : 0, svd->cred);
4114 4114 if (ierr != 0) {
4115 4115 SEGVN_VMSTAT_FLTVNPAGES(28);
4116 4116 anon_array_exit(&an_cookie);
4117 4117 ANON_LOCK_EXIT(&->a_rwlock);
4118 4118 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4119 4119 err = FC_MAKE_ERR(ierr);
4120 4120 goto out;
4121 4121 }
4122 4122
4123 4123 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4124 4124 /*
4125 4125 * p_szc can't be changed for locked
4126 4126 * swapfs pages.
4127 4127 */
4128 4128 ASSERT(svd->rcookie ==
4129 4129 HAT_INVALID_REGION_COOKIE);
4130 4130 hat_memload_array(hat, a, pgsz, ppa, prot,
4131 4131 hat_flag);
4132 4132
4133 4133 if (!(hat_flag & HAT_LOAD_LOCK)) {
4134 4134 SEGVN_VMSTAT_FLTVNPAGES(29);
4135 4135 for (i = 0; i < pages; i++) {
4136 4136 page_unlock(ppa[i]);
4137 4137 }
4138 4138 }
4139 4139 anon_array_exit(&an_cookie);
4140 4140 ANON_LOCK_EXIT(&->a_rwlock);
4141 4141 goto next;
4142 4142 }
4143 4143
4144 4144 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4145 4145 (!svd->pageprot && svd->prot == (prot & vpprot)));
4146 4146
4147 4147 pfn = page_pptonum(ppa[0]);
4148 4148 /*
4149 4149 * hat_page_demote() needs an SE_EXCL lock on one of
4150 4150 * constituent page_t's and it decreases root's p_szc
4151 4151 * last. This means if root's p_szc is equal szc and
4152 4152 * all its constituent pages are locked
4153 4153 * hat_page_demote() that could have changed p_szc to
4154 4154 * szc is already done and no new have page_demote()
4155 4155 * can start for this large page.
4156 4156 */
4157 4157
4158 4158 /*
4159 4159 * we need to make sure same mapping size is used for
4160 4160 * the same address range if there's a possibility the
4161 4161 * adddress is already mapped because hat layer panics
4162 4162 * when translation is loaded for the range already
4163 4163 * mapped with a different page size. We achieve it
4164 4164 * by always using largest page size possible subject
4165 4165 * to the constraints of page size, segment page size
4166 4166 * and page alignment. Since mappings are invalidated
4167 4167 * when those constraints change and make it
4168 4168 * impossible to use previously used mapping size no
4169 4169 * mapping size conflicts should happen.
4170 4170 */
4171 4171
4172 4172 chkszc:
4173 4173 if ((pszc = ppa[0]->p_szc) == szc &&
4174 4174 IS_P2ALIGNED(pfn, pages)) {
4175 4175
4176 4176 SEGVN_VMSTAT_FLTVNPAGES(30);
4177 4177 #ifdef DEBUG
4178 4178 for (i = 0; i < pages; i++) {
4179 4179 ASSERT(PAGE_LOCKED(ppa[i]));
4180 4180 ASSERT(!PP_ISFREE(ppa[i]));
4181 4181 ASSERT(page_pptonum(ppa[i]) ==
4182 4182 pfn + i);
4183 4183 ASSERT(ppa[i]->p_szc == szc);
4184 4184 ASSERT(ppa[i]->p_vnode == vp);
4185 4185 ASSERT(ppa[i]->p_offset ==
4186 4186 off + (i << PAGESHIFT));
4187 4187 }
4188 4188 #endif /* DEBUG */
4189 4189 /*
4190 4190 * All pages are of szc we need and they are
4191 4191 * all locked so they can't change szc. load
4192 4192 * translations.
4193 4193 *
4194 4194 * if page got promoted since last check
4195 4195 * we don't need pplist.
4196 4196 */
4197 4197 if (pplist != NULL) {
4198 4198 page_free_replacement_page(pplist);
4199 4199 page_create_putback(pages);
4200 4200 }
4201 4201 if (PP_ISMIGRATE(ppa[0])) {
4202 4202 page_migrate(seg, a, ppa, pages);
4203 4203 }
4204 4204 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4205 4205 prot, vpprot);
4206 4206 if (!xhat) {
4207 4207 hat_memload_array_region(hat, a, pgsz,
4208 4208 ppa, prot & vpprot, hat_flag,
4209 4209 svd->rcookie);
4210 4210 } else {
4211 4211 /*
4212 4212 * avoid large xhat mappings to FS
4213 4213 * pages so that hat_page_demote()
4214 4214 * doesn't need to check for xhat
4215 4215 * large mappings.
4216 4216 * Don't use regions with xhats.
4217 4217 */
4218 4218 for (i = 0; i < pages; i++) {
4219 4219 hat_memload(hat,
4220 4220 a + (i << PAGESHIFT),
4221 4221 ppa[i], prot & vpprot,
4222 4222 hat_flag);
4223 4223 }
4224 4224 }
4225 4225
4226 4226 if (!(hat_flag & HAT_LOAD_LOCK)) {
4227 4227 for (i = 0; i < pages; i++) {
4228 4228 page_unlock(ppa[i]);
4229 4229 }
4230 4230 }
4231 4231 if (amp != NULL) {
4232 4232 anon_array_exit(&an_cookie);
4233 4233 ANON_LOCK_EXIT(&->a_rwlock);
4234 4234 }
4235 4235 goto next;
4236 4236 }
4237 4237
4238 4238 /*
4239 4239 * See if upsize is possible.
4240 4240 */
4241 4241 if (pszc > szc && szc < seg->s_szc &&
4242 4242 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4243 4243 pgcnt_t aphase;
4244 4244 uint_t pszc1 = MIN(pszc, seg->s_szc);
4245 4245 ppgsz = page_get_pagesize(pszc1);
4246 4246 ppages = btop(ppgsz);
4247 4247 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4248 4248
4249 4249 ASSERT(type != F_SOFTLOCK);
4250 4250
4251 4251 SEGVN_VMSTAT_FLTVNPAGES(31);
4252 4252 if (aphase != P2PHASE(pfn, ppages)) {
4253 4253 segvn_faultvnmpss_align_err4++;
4254 4254 } else {
4255 4255 SEGVN_VMSTAT_FLTVNPAGES(32);
4256 4256 if (pplist != NULL) {
4257 4257 page_t *pl = pplist;
4258 4258 page_free_replacement_page(pl);
4259 4259 page_create_putback(pages);
4260 4260 }
4261 4261 for (i = 0; i < pages; i++) {
4262 4262 page_unlock(ppa[i]);
4263 4263 }
4264 4264 if (amp != NULL) {
4265 4265 anon_array_exit(&an_cookie);
4266 4266 ANON_LOCK_EXIT(&->a_rwlock);
4267 4267 }
4268 4268 pszc = pszc1;
4269 4269 ierr = -2;
4270 4270 break;
4271 4271 }
4272 4272 }
4273 4273
4274 4274 /*
4275 4275 * check if we should use smallest mapping size.
4276 4276 */
4277 4277 upgrdfail = 0;
4278 4278 if (szc == 0 || xhat ||
4279 4279 (pszc >= szc &&
4280 4280 !IS_P2ALIGNED(pfn, pages)) ||
4281 4281 (pszc < szc &&
4282 4282 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4283 4283 &pszc))) {
4284 4284
4285 4285 if (upgrdfail && type != F_SOFTLOCK) {
4286 4286 /*
4287 4287 * segvn_full_szcpages failed to lock
4288 4288 * all pages EXCL. Size down.
4289 4289 */
4290 4290 ASSERT(pszc < szc);
4291 4291
4292 4292 SEGVN_VMSTAT_FLTVNPAGES(33);
4293 4293
4294 4294 if (pplist != NULL) {
4295 4295 page_t *pl = pplist;
4296 4296 page_free_replacement_page(pl);
4297 4297 page_create_putback(pages);
4298 4298 }
4299 4299
4300 4300 for (i = 0; i < pages; i++) {
4301 4301 page_unlock(ppa[i]);
4302 4302 }
4303 4303 if (amp != NULL) {
4304 4304 anon_array_exit(&an_cookie);
4305 4305 ANON_LOCK_EXIT(&->a_rwlock);
4306 4306 }
4307 4307 ierr = -1;
4308 4308 break;
4309 4309 }
4310 4310 if (szc != 0 && !xhat && !upgrdfail) {
4311 4311 segvn_faultvnmpss_align_err5++;
4312 4312 }
4313 4313 SEGVN_VMSTAT_FLTVNPAGES(34);
4314 4314 if (pplist != NULL) {
4315 4315 page_free_replacement_page(pplist);
4316 4316 page_create_putback(pages);
4317 4317 }
4318 4318 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4319 4319 prot, vpprot);
4320 4320 if (upgrdfail && segvn_anypgsz_vnode) {
4321 4321 /* SOFTLOCK case */
4322 4322 hat_memload_array_region(hat, a, pgsz,
4323 4323 ppa, prot & vpprot, hat_flag,
4324 4324 svd->rcookie);
4325 4325 } else {
4326 4326 for (i = 0; i < pages; i++) {
4327 4327 hat_memload_region(hat,
4328 4328 a + (i << PAGESHIFT),
4329 4329 ppa[i], prot & vpprot,
4330 4330 hat_flag, svd->rcookie);
4331 4331 }
4332 4332 }
4333 4333 if (!(hat_flag & HAT_LOAD_LOCK)) {
4334 4334 for (i = 0; i < pages; i++) {
4335 4335 page_unlock(ppa[i]);
4336 4336 }
4337 4337 }
4338 4338 if (amp != NULL) {
4339 4339 anon_array_exit(&an_cookie);
4340 4340 ANON_LOCK_EXIT(&->a_rwlock);
4341 4341 }
4342 4342 goto next;
4343 4343 }
4344 4344
4345 4345 if (pszc == szc) {
4346 4346 /*
4347 4347 * segvn_full_szcpages() upgraded pages szc.
4348 4348 */
4349 4349 ASSERT(pszc == ppa[0]->p_szc);
4350 4350 ASSERT(IS_P2ALIGNED(pfn, pages));
4351 4351 goto chkszc;
4352 4352 }
4353 4353
4354 4354 if (pszc > szc) {
4355 4355 kmutex_t *szcmtx;
4356 4356 SEGVN_VMSTAT_FLTVNPAGES(35);
4357 4357 /*
4358 4358 * p_szc of ppa[0] can change since we haven't
4359 4359 * locked all constituent pages. Call
4360 4360 * page_lock_szc() to prevent szc changes.
4361 4361 * This should be a rare case that happens when
4362 4362 * multiple segments use a different page size
4363 4363 * to map the same file offsets.
4364 4364 */
4365 4365 szcmtx = page_szc_lock(ppa[0]);
4366 4366 pszc = ppa[0]->p_szc;
4367 4367 ASSERT(szcmtx != NULL || pszc == 0);
4368 4368 ASSERT(ppa[0]->p_szc <= pszc);
4369 4369 if (pszc <= szc) {
4370 4370 SEGVN_VMSTAT_FLTVNPAGES(36);
4371 4371 if (szcmtx != NULL) {
4372 4372 mutex_exit(szcmtx);
4373 4373 }
4374 4374 goto chkszc;
4375 4375 }
4376 4376 if (pplist != NULL) {
4377 4377 /*
4378 4378 * page got promoted since last check.
4379 4379 * we don't need preaalocated large
4380 4380 * page.
4381 4381 */
4382 4382 SEGVN_VMSTAT_FLTVNPAGES(37);
4383 4383 page_free_replacement_page(pplist);
4384 4384 page_create_putback(pages);
4385 4385 }
4386 4386 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4387 4387 prot, vpprot);
4388 4388 hat_memload_array_region(hat, a, pgsz, ppa,
4389 4389 prot & vpprot, hat_flag, svd->rcookie);
4390 4390 mutex_exit(szcmtx);
4391 4391 if (!(hat_flag & HAT_LOAD_LOCK)) {
4392 4392 for (i = 0; i < pages; i++) {
4393 4393 page_unlock(ppa[i]);
4394 4394 }
4395 4395 }
4396 4396 if (amp != NULL) {
4397 4397 anon_array_exit(&an_cookie);
4398 4398 ANON_LOCK_EXIT(&->a_rwlock);
4399 4399 }
4400 4400 goto next;
4401 4401 }
4402 4402
4403 4403 /*
4404 4404 * if page got demoted since last check
4405 4405 * we could have not allocated larger page.
4406 4406 * allocate now.
4407 4407 */
4408 4408 if (pplist == NULL &&
4409 4409 page_alloc_pages(vp, seg, a, &pplist, NULL,
4410 4410 szc, 0, 0) && type != F_SOFTLOCK) {
4411 4411 SEGVN_VMSTAT_FLTVNPAGES(38);
4412 4412 for (i = 0; i < pages; i++) {
4413 4413 page_unlock(ppa[i]);
4414 4414 }
4415 4415 if (amp != NULL) {
4416 4416 anon_array_exit(&an_cookie);
4417 4417 ANON_LOCK_EXIT(&->a_rwlock);
4418 4418 }
4419 4419 ierr = -1;
4420 4420 alloc_failed |= (1 << szc);
4421 4421 break;
4422 4422 }
4423 4423
4424 4424 SEGVN_VMSTAT_FLTVNPAGES(39);
4425 4425
4426 4426 if (pplist != NULL) {
4427 4427 segvn_relocate_pages(ppa, pplist);
4428 4428 #ifdef DEBUG
4429 4429 } else {
4430 4430 ASSERT(type == F_SOFTLOCK);
4431 4431 SEGVN_VMSTAT_FLTVNPAGES(40);
4432 4432 #endif /* DEBUG */
4433 4433 }
4434 4434
4435 4435 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4436 4436
4437 4437 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4438 4438 ASSERT(type == F_SOFTLOCK);
4439 4439 for (i = 0; i < pages; i++) {
4440 4440 ASSERT(ppa[i]->p_szc < szc);
4441 4441 hat_memload_region(hat,
4442 4442 a + (i << PAGESHIFT),
4443 4443 ppa[i], prot & vpprot, hat_flag,
4444 4444 svd->rcookie);
4445 4445 }
4446 4446 } else {
4447 4447 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4448 4448 hat_memload_array_region(hat, a, pgsz, ppa,
4449 4449 prot & vpprot, hat_flag, svd->rcookie);
4450 4450 }
4451 4451 if (!(hat_flag & HAT_LOAD_LOCK)) {
4452 4452 for (i = 0; i < pages; i++) {
4453 4453 ASSERT(PAGE_SHARED(ppa[i]));
4454 4454 page_unlock(ppa[i]);
4455 4455 }
4456 4456 }
4457 4457 if (amp != NULL) {
4458 4458 anon_array_exit(&an_cookie);
4459 4459 ANON_LOCK_EXIT(&->a_rwlock);
4460 4460 }
4461 4461
4462 4462 next:
4463 4463 if (vpage != NULL) {
4464 4464 vpage += pages;
4465 4465 }
4466 4466 adjszc_chk = 1;
4467 4467 }
4468 4468 if (a == lpgeaddr)
4469 4469 break;
4470 4470 ASSERT(a < lpgeaddr);
4471 4471
4472 4472 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4473 4473
4474 4474 /*
4475 4475 * ierr == -1 means we failed to map with a large page.
4476 4476 * (either due to allocation/relocation failures or
4477 4477 * misalignment with other mappings to this file.
4478 4478 *
4479 4479 * ierr == -2 means some other thread allocated a large page
4480 4480 * after we gave up tp map with a large page. retry with
4481 4481 * larger mapping.
4482 4482 */
4483 4483 ASSERT(ierr == -1 || ierr == -2);
4484 4484 ASSERT(ierr == -2 || szc != 0);
4485 4485 ASSERT(ierr == -1 || szc < seg->s_szc);
4486 4486 if (ierr == -2) {
4487 4487 SEGVN_VMSTAT_FLTVNPAGES(41);
4488 4488 ASSERT(pszc > szc && pszc <= seg->s_szc);
4489 4489 szc = pszc;
4490 4490 } else if (segvn_anypgsz_vnode) {
4491 4491 SEGVN_VMSTAT_FLTVNPAGES(42);
4492 4492 szc--;
4493 4493 } else {
4494 4494 SEGVN_VMSTAT_FLTVNPAGES(43);
4495 4495 ASSERT(pszc < szc);
4496 4496 /*
4497 4497 * other process created pszc large page.
4498 4498 * but we still have to drop to 0 szc.
4499 4499 */
4500 4500 szc = 0;
4501 4501 }
4502 4502
4503 4503 pgsz = page_get_pagesize(szc);
4504 4504 pages = btop(pgsz);
4505 4505 if (ierr == -2) {
4506 4506 /*
4507 4507 * Size up case. Note lpgaddr may only be needed for
4508 4508 * softlock case so we don't adjust it here.
4509 4509 */
4510 4510 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4511 4511 ASSERT(a >= lpgaddr);
4512 4512 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4513 4513 off = svd->offset + (uintptr_t)(a - seg->s_base);
4514 4514 aindx = svd->anon_index + seg_page(seg, a);
4515 4515 vpage = (svd->vpage != NULL) ?
4516 4516 &svd->vpage[seg_page(seg, a)] : NULL;
4517 4517 } else {
4518 4518 /*
4519 4519 * Size down case. Note lpgaddr may only be needed for
4520 4520 * softlock case so we don't adjust it here.
4521 4521 */
4522 4522 ASSERT(IS_P2ALIGNED(a, pgsz));
4523 4523 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4524 4524 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4525 4525 ASSERT(a < lpgeaddr);
4526 4526 if (a < addr) {
4527 4527 SEGVN_VMSTAT_FLTVNPAGES(44);
4528 4528 /*
4529 4529 * The beginning of the large page region can
4530 4530 * be pulled to the right to make a smaller
4531 4531 * region. We haven't yet faulted a single
4532 4532 * page.
4533 4533 */
4534 4534 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4535 4535 ASSERT(a >= lpgaddr);
4536 4536 off = svd->offset +
4537 4537 (uintptr_t)(a - seg->s_base);
4538 4538 aindx = svd->anon_index + seg_page(seg, a);
4539 4539 vpage = (svd->vpage != NULL) ?
4540 4540 &svd->vpage[seg_page(seg, a)] : NULL;
4541 4541 }
4542 4542 }
4543 4543 }
4544 4544 out:
4545 4545 kmem_free(ppa, ppasize);
4546 4546 if (!err && !vop_size_err) {
4547 4547 SEGVN_VMSTAT_FLTVNPAGES(45);
4548 4548 return (0);
4549 4549 }
4550 4550 if (type == F_SOFTLOCK && a > lpgaddr) {
4551 4551 SEGVN_VMSTAT_FLTVNPAGES(46);
4552 4552 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4553 4553 }
4554 4554 if (!vop_size_err) {
4555 4555 SEGVN_VMSTAT_FLTVNPAGES(47);
4556 4556 return (err);
4557 4557 }
4558 4558 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4559 4559 /*
4560 4560 * Large page end is mapped beyond the end of file and it's a cow
4561 4561 * fault (can be a text replication induced cow) or softlock so we can't
4562 4562 * reduce the map area. For now just demote the segment. This should
4563 4563 * really only happen if the end of the file changed after the mapping
4564 4564 * was established since when large page segments are created we make
4565 4565 * sure they don't extend beyond the end of the file.
4566 4566 */
4567 4567 SEGVN_VMSTAT_FLTVNPAGES(48);
4568 4568
4569 4569 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4570 4570 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4571 4571 err = 0;
4572 4572 if (seg->s_szc != 0) {
4573 4573 segvn_fltvnpages_clrszc_cnt++;
4574 4574 ASSERT(svd->softlockcnt == 0);
4575 4575 err = segvn_clrszc(seg);
4576 4576 if (err != 0) {
4577 4577 segvn_fltvnpages_clrszc_err++;
4578 4578 }
4579 4579 }
4580 4580 ASSERT(err || seg->s_szc == 0);
4581 4581 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4582 4582 /* segvn_fault will do its job as if szc had been zero to begin with */
4583 4583 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4584 4584 }
4585 4585
4586 4586 /*
4587 4587 * This routine will attempt to fault in one large page.
4588 4588 * it will use smaller pages if that fails.
4589 4589 * It should only be called for pure anonymous segments.
4590 4590 */
4591 4591 static faultcode_t
4592 4592 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4593 4593 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4594 4594 caddr_t eaddr, int brkcow)
4595 4595 {
4596 4596 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4597 4597 struct anon_map *amp = svd->amp;
4598 4598 uchar_t segtype = svd->type;
4599 4599 uint_t szc = seg->s_szc;
4600 4600 size_t pgsz = page_get_pagesize(szc);
4601 4601 size_t maxpgsz = pgsz;
4602 4602 pgcnt_t pages = btop(pgsz);
4603 4603 uint_t ppaszc = szc;
4604 4604 caddr_t a = lpgaddr;
4605 4605 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4606 4606 struct vpage *vpage = (svd->vpage != NULL) ?
4607 4607 &svd->vpage[seg_page(seg, a)] : NULL;
4608 4608 page_t **ppa;
4609 4609 uint_t ppa_szc;
4610 4610 faultcode_t err;
4611 4611 int ierr;
4612 4612 uint_t protchk, prot, vpprot;
4613 4613 ulong_t i;
4614 4614 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4615 4615 anon_sync_obj_t cookie;
4616 4616 int adjszc_chk;
4617 4617 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4618 4618
4619 4619 ASSERT(szc != 0);
4620 4620 ASSERT(amp != NULL);
4621 4621 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4622 4622 ASSERT(!(svd->flags & MAP_NORESERVE));
4623 4623 ASSERT(type != F_SOFTUNLOCK);
4624 4624 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4625 4625 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4626 4626 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4627 4627
4628 4628 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4629 4629
4630 4630 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4631 4631 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4632 4632
4633 4633 if (svd->flags & MAP_TEXT) {
4634 4634 hat_flag |= HAT_LOAD_TEXT;
4635 4635 }
4636 4636
4637 4637 if (svd->pageprot) {
4638 4638 switch (rw) {
4639 4639 case S_READ:
4640 4640 protchk = PROT_READ;
4641 4641 break;
4642 4642 case S_WRITE:
4643 4643 protchk = PROT_WRITE;
4644 4644 break;
4645 4645 case S_EXEC:
4646 4646 protchk = PROT_EXEC;
4647 4647 break;
4648 4648 case S_OTHER:
4649 4649 default:
4650 4650 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4651 4651 break;
4652 4652 }
4653 4653 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4654 4654 } else {
4655 4655 prot = svd->prot;
4656 4656 /* caller has already done segment level protection check. */
4657 4657 }
4658 4658
4659 4659 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4660 4660 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4661 4661 for (;;) {
4662 4662 adjszc_chk = 0;
4663 4663 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4664 4664 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4665 4665 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4666 4666 ASSERT(vpage != NULL);
4667 4667 prot = VPP_PROT(vpage);
4668 4668 ASSERT(sameprot(seg, a, maxpgsz));
4669 4669 if ((prot & protchk) == 0) {
4670 4670 err = FC_PROT;
4671 4671 goto error;
4672 4672 }
4673 4673 }
4674 4674 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4675 4675 pgsz < maxpgsz) {
4676 4676 ASSERT(a > lpgaddr);
4677 4677 szc = seg->s_szc;
4678 4678 pgsz = maxpgsz;
4679 4679 pages = btop(pgsz);
4680 4680 ASSERT(IS_P2ALIGNED(aindx, pages));
4681 4681 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4682 4682 pgsz);
4683 4683 }
4684 4684 if (type == F_SOFTLOCK) {
4685 4685 atomic_add_long((ulong_t *)&svd->softlockcnt,
4686 4686 pages);
4687 4687 }
4688 4688 anon_array_enter(amp, aindx, &cookie);
4689 4689 ppa_szc = (uint_t)-1;
4690 4690 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4691 4691 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4692 4692 segvn_anypgsz, pgflags, svd->cred);
4693 4693 if (ierr != 0) {
4694 4694 anon_array_exit(&cookie);
4695 4695 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4696 4696 if (type == F_SOFTLOCK) {
4697 4697 atomic_add_long(
4698 4698 (ulong_t *)&svd->softlockcnt,
4699 4699 -pages);
4700 4700 }
4701 4701 if (ierr > 0) {
4702 4702 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4703 4703 err = FC_MAKE_ERR(ierr);
4704 4704 goto error;
4705 4705 }
4706 4706 break;
4707 4707 }
4708 4708
4709 4709 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4710 4710
4711 4711 ASSERT(segtype == MAP_SHARED ||
4712 4712 ppa[0]->p_szc <= szc);
4713 4713 ASSERT(segtype == MAP_PRIVATE ||
4714 4714 ppa[0]->p_szc >= szc);
4715 4715
4716 4716 /*
4717 4717 * Handle pages that have been marked for migration
4718 4718 */
4719 4719 if (lgrp_optimizations())
4720 4720 page_migrate(seg, a, ppa, pages);
4721 4721
4722 4722 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4723 4723
4724 4724 if (segtype == MAP_SHARED) {
4725 4725 vpprot |= PROT_WRITE;
4726 4726 }
4727 4727
4728 4728 hat_memload_array(hat, a, pgsz, ppa,
4729 4729 prot & vpprot, hat_flag);
4730 4730
4731 4731 if (hat_flag & HAT_LOAD_LOCK) {
4732 4732 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4733 4733 } else {
4734 4734 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4735 4735 for (i = 0; i < pages; i++)
4736 4736 page_unlock(ppa[i]);
4737 4737 }
4738 4738 if (vpage != NULL)
4739 4739 vpage += pages;
4740 4740
4741 4741 anon_array_exit(&cookie);
4742 4742 adjszc_chk = 1;
4743 4743 }
4744 4744 if (a == lpgeaddr)
4745 4745 break;
4746 4746 ASSERT(a < lpgeaddr);
4747 4747 /*
4748 4748 * ierr == -1 means we failed to allocate a large page.
4749 4749 * so do a size down operation.
4750 4750 *
4751 4751 * ierr == -2 means some other process that privately shares
4752 4752 * pages with this process has allocated a larger page and we
4753 4753 * need to retry with larger pages. So do a size up
4754 4754 * operation. This relies on the fact that large pages are
4755 4755 * never partially shared i.e. if we share any constituent
4756 4756 * page of a large page with another process we must share the
4757 4757 * entire large page. Note this cannot happen for SOFTLOCK
4758 4758 * case, unless current address (a) is at the beginning of the
4759 4759 * next page size boundary because the other process couldn't
4760 4760 * have relocated locked pages.
4761 4761 */
4762 4762 ASSERT(ierr == -1 || ierr == -2);
4763 4763
4764 4764 if (segvn_anypgsz) {
4765 4765 ASSERT(ierr == -2 || szc != 0);
4766 4766 ASSERT(ierr == -1 || szc < seg->s_szc);
4767 4767 szc = (ierr == -1) ? szc - 1 : szc + 1;
4768 4768 } else {
4769 4769 /*
4770 4770 * For non COW faults and segvn_anypgsz == 0
4771 4771 * we need to be careful not to loop forever
4772 4772 * if existing page is found with szc other
4773 4773 * than 0 or seg->s_szc. This could be due
4774 4774 * to page relocations on behalf of DR or
4775 4775 * more likely large page creation. For this
4776 4776 * case simply re-size to existing page's szc
4777 4777 * if returned by anon_map_getpages().
4778 4778 */
4779 4779 if (ppa_szc == (uint_t)-1) {
4780 4780 szc = (ierr == -1) ? 0 : seg->s_szc;
4781 4781 } else {
4782 4782 ASSERT(ppa_szc <= seg->s_szc);
4783 4783 ASSERT(ierr == -2 || ppa_szc < szc);
4784 4784 ASSERT(ierr == -1 || ppa_szc > szc);
4785 4785 szc = ppa_szc;
4786 4786 }
4787 4787 }
4788 4788
4789 4789 pgsz = page_get_pagesize(szc);
4790 4790 pages = btop(pgsz);
4791 4791 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4792 4792 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4793 4793 if (type == F_SOFTLOCK) {
4794 4794 /*
4795 4795 * For softlocks we cannot reduce the fault area
4796 4796 * (calculated based on the largest page size for this
4797 4797 * segment) for size down and a is already next
4798 4798 * page size aligned as assertted above for size
4799 4799 * ups. Therefore just continue in case of softlock.
4800 4800 */
4801 4801 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4802 4802 continue; /* keep lint happy */
4803 4803 } else if (ierr == -2) {
4804 4804
4805 4805 /*
4806 4806 * Size up case. Note lpgaddr may only be needed for
4807 4807 * softlock case so we don't adjust it here.
4808 4808 */
4809 4809 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4810 4810 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4811 4811 ASSERT(a >= lpgaddr);
4812 4812 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4813 4813 aindx = svd->anon_index + seg_page(seg, a);
4814 4814 vpage = (svd->vpage != NULL) ?
4815 4815 &svd->vpage[seg_page(seg, a)] : NULL;
4816 4816 } else {
4817 4817 /*
4818 4818 * Size down case. Note lpgaddr may only be needed for
4819 4819 * softlock case so we don't adjust it here.
4820 4820 */
4821 4821 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4822 4822 ASSERT(IS_P2ALIGNED(a, pgsz));
4823 4823 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4824 4824 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4825 4825 ASSERT(a < lpgeaddr);
4826 4826 if (a < addr) {
4827 4827 /*
4828 4828 * The beginning of the large page region can
4829 4829 * be pulled to the right to make a smaller
4830 4830 * region. We haven't yet faulted a single
4831 4831 * page.
4832 4832 */
4833 4833 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4834 4834 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4835 4835 ASSERT(a >= lpgaddr);
4836 4836 aindx = svd->anon_index + seg_page(seg, a);
4837 4837 vpage = (svd->vpage != NULL) ?
4838 4838 &svd->vpage[seg_page(seg, a)] : NULL;
4839 4839 }
4840 4840 }
4841 4841 }
4842 4842 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4843 4843 ANON_LOCK_EXIT(&->a_rwlock);
4844 4844 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4845 4845 return (0);
4846 4846 error:
4847 4847 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4848 4848 ANON_LOCK_EXIT(&->a_rwlock);
4849 4849 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4850 4850 if (type == F_SOFTLOCK && a > lpgaddr) {
4851 4851 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4852 4852 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4853 4853 }
4854 4854 return (err);
4855 4855 }
4856 4856
4857 4857 int fltadvice = 1; /* set to free behind pages for sequential access */
4858 4858
4859 4859 /*
4860 4860 * This routine is called via a machine specific fault handling routine.
4861 4861 * It is also called by software routines wishing to lock or unlock
4862 4862 * a range of addresses.
4863 4863 *
4864 4864 * Here is the basic algorithm:
4865 4865 * If unlocking
4866 4866 * Call segvn_softunlock
4867 4867 * Return
4868 4868 * endif
4869 4869 * Checking and set up work
4870 4870 * If we will need some non-anonymous pages
4871 4871 * Call VOP_GETPAGE over the range of non-anonymous pages
4872 4872 * endif
4873 4873 * Loop over all addresses requested
4874 4874 * Call segvn_faultpage passing in page list
4875 4875 * to load up translations and handle anonymous pages
4876 4876 * endloop
4877 4877 * Load up translation to any additional pages in page list not
4878 4878 * already handled that fit into this segment
4879 4879 */
4880 4880 static faultcode_t
4881 4881 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4882 4882 enum fault_type type, enum seg_rw rw)
4883 4883 {
4884 4884 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4885 4885 page_t **plp, **ppp, *pp;
4886 4886 u_offset_t off;
4887 4887 caddr_t a;
4888 4888 struct vpage *vpage;
4889 4889 uint_t vpprot, prot;
4890 4890 int err;
4891 4891 page_t *pl[PVN_GETPAGE_NUM + 1];
4892 4892 size_t plsz, pl_alloc_sz;
4893 4893 size_t page;
4894 4894 ulong_t anon_index;
4895 4895 struct anon_map *amp;
4896 4896 int dogetpage = 0;
4897 4897 caddr_t lpgaddr, lpgeaddr;
4898 4898 size_t pgsz;
4899 4899 anon_sync_obj_t cookie;
4900 4900 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4901 4901
4902 4902 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4903 4903 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4904 4904
4905 4905 /*
4906 4906 * First handle the easy stuff
4907 4907 */
4908 4908 if (type == F_SOFTUNLOCK) {
4909 4909 if (rw == S_READ_NOCOW) {
4910 4910 rw = S_READ;
4911 4911 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4912 4912 }
4913 4913 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4914 4914 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4915 4915 page_get_pagesize(seg->s_szc);
4916 4916 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4917 4917 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4918 4918 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4919 4919 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4920 4920 return (0);
4921 4921 }
4922 4922
4923 4923 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4924 4924 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4925 4925 if (brkcow == 0) {
4926 4926 if (svd->tr_state == SEGVN_TR_INIT) {
4927 4927 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4928 4928 if (svd->tr_state == SEGVN_TR_INIT) {
4929 4929 ASSERT(svd->vp != NULL && svd->amp == NULL);
4930 4930 ASSERT(svd->flags & MAP_TEXT);
4931 4931 ASSERT(svd->type == MAP_PRIVATE);
4932 4932 segvn_textrepl(seg);
4933 4933 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4934 4934 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4935 4935 svd->amp != NULL);
4936 4936 }
4937 4937 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4938 4938 }
4939 4939 } else if (svd->tr_state != SEGVN_TR_OFF) {
4940 4940 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4941 4941
4942 4942 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4943 4943 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4944 4944 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4945 4945 return (FC_PROT);
4946 4946 }
4947 4947
4948 4948 if (svd->tr_state == SEGVN_TR_ON) {
4949 4949 ASSERT(svd->vp != NULL && svd->amp != NULL);
4950 4950 segvn_textunrepl(seg, 0);
4951 4951 ASSERT(svd->amp == NULL &&
4952 4952 svd->tr_state == SEGVN_TR_OFF);
4953 4953 } else if (svd->tr_state != SEGVN_TR_OFF) {
4954 4954 svd->tr_state = SEGVN_TR_OFF;
4955 4955 }
4956 4956 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4957 4957 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4958 4958 }
4959 4959
4960 4960 top:
4961 4961 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4962 4962
4963 4963 /*
4964 4964 * If we have the same protections for the entire segment,
4965 4965 * insure that the access being attempted is legitimate.
4966 4966 */
4967 4967
4968 4968 if (svd->pageprot == 0) {
4969 4969 uint_t protchk;
4970 4970
4971 4971 switch (rw) {
4972 4972 case S_READ:
4973 4973 case S_READ_NOCOW:
4974 4974 protchk = PROT_READ;
4975 4975 break;
4976 4976 case S_WRITE:
4977 4977 protchk = PROT_WRITE;
4978 4978 break;
4979 4979 case S_EXEC:
4980 4980 protchk = PROT_EXEC;
4981 4981 break;
4982 4982 case S_OTHER:
4983 4983 default:
4984 4984 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4985 4985 break;
4986 4986 }
4987 4987
4988 4988 if ((svd->prot & protchk) == 0) {
4989 4989 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4990 4990 return (FC_PROT); /* illegal access type */
4991 4991 }
4992 4992 }
4993 4993
4994 4994 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
4995 4995 /* this must be SOFTLOCK S_READ fault */
4996 4996 ASSERT(svd->amp == NULL);
4997 4997 ASSERT(svd->tr_state == SEGVN_TR_OFF);
4998 4998 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4999 4999 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5000 5000 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5001 5001 /*
5002 5002 * this must be the first ever non S_READ_NOCOW
5003 5003 * softlock for this segment.
5004 5004 */
5005 5005 ASSERT(svd->softlockcnt == 0);
5006 5006 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5007 5007 HAT_REGION_TEXT);
5008 5008 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5009 5009 }
5010 5010 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5011 5011 goto top;
5012 5012 }
5013 5013
5014 5014 /*
5015 5015 * We can't allow the long term use of softlocks for vmpss segments,
5016 5016 * because in some file truncation cases we should be able to demote
5017 5017 * the segment, which requires that there are no softlocks. The
5018 5018 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5019 5019 * segment is S_READ_NOCOW, where the caller holds the address space
5020 5020 * locked as writer and calls softunlock before dropping the as lock.
5021 5021 * S_READ_NOCOW is used by /proc to read memory from another user.
5022 5022 *
5023 5023 * Another deadlock between SOFTLOCK and file truncation can happen
5024 5024 * because segvn_fault_vnodepages() calls the FS one pagesize at
5025 5025 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5026 5026 * can cause a deadlock because the first set of page_t's remain
5027 5027 * locked SE_SHARED. To avoid this, we demote segments on a first
5028 5028 * SOFTLOCK if they have a length greater than the segment's
5029 5029 * page size.
5030 5030 *
5031 5031 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5032 5032 * the access type is S_READ_NOCOW and the fault length is less than
5033 5033 * or equal to the segment's page size. While this is quite restrictive,
5034 5034 * it should be the most common case of SOFTLOCK against a vmpss
5035 5035 * segment.
5036 5036 *
5037 5037 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5038 5038 * caller makes sure no COW will be caused by another thread for a
5039 5039 * softlocked page.
5040 5040 */
5041 5041 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5042 5042 int demote = 0;
5043 5043
5044 5044 if (rw != S_READ_NOCOW) {
5045 5045 demote = 1;
5046 5046 }
5047 5047 if (!demote && len > PAGESIZE) {
5048 5048 pgsz = page_get_pagesize(seg->s_szc);
5049 5049 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5050 5050 lpgeaddr);
5051 5051 if (lpgeaddr - lpgaddr > pgsz) {
5052 5052 demote = 1;
5053 5053 }
5054 5054 }
5055 5055
5056 5056 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5057 5057
5058 5058 if (demote) {
5059 5059 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5060 5060 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5061 5061 if (seg->s_szc != 0) {
5062 5062 segvn_vmpss_clrszc_cnt++;
5063 5063 ASSERT(svd->softlockcnt == 0);
5064 5064 err = segvn_clrszc(seg);
5065 5065 if (err) {
5066 5066 segvn_vmpss_clrszc_err++;
5067 5067 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5068 5068 return (FC_MAKE_ERR(err));
5069 5069 }
5070 5070 }
5071 5071 ASSERT(seg->s_szc == 0);
5072 5072 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5073 goto top;
5074 5074 }
5075 5075 }
5076 5076
5077 5077 /*
5078 5078 * Check to see if we need to allocate an anon_map structure.
5079 5079 */
5080 5080 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5081 5081 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5082 5082 /*
5083 5083 * Drop the "read" lock on the segment and acquire
5084 5084 * the "write" version since we have to allocate the
5085 5085 * anon_map.
5086 5086 */
5087 5087 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5088 5088 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5089 5089
5090 5090 if (svd->amp == NULL) {
5091 5091 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5092 5092 svd->amp->a_szc = seg->s_szc;
5093 5093 }
5094 5094 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5095 5095
5096 5096 /*
5097 5097 * Start all over again since segment protections
5098 5098 * may have changed after we dropped the "read" lock.
5099 5099 */
5100 5100 goto top;
5101 5101 }
5102 5102
5103 5103 /*
5104 5104 * S_READ_NOCOW vs S_READ distinction was
5105 5105 * only needed for the code above. After
5106 5106 * that we treat it as S_READ.
5107 5107 */
5108 5108 if (rw == S_READ_NOCOW) {
5109 5109 ASSERT(type == F_SOFTLOCK);
5110 5110 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5111 5111 rw = S_READ;
5112 5112 }
5113 5113
5114 5114 amp = svd->amp;
5115 5115
5116 5116 /*
5117 5117 * MADV_SEQUENTIAL work is ignored for large page segments.
5118 5118 */
5119 5119 if (seg->s_szc != 0) {
5120 5120 pgsz = page_get_pagesize(seg->s_szc);
5121 5121 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5122 5122 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5123 5123 if (svd->vp == NULL) {
5124 5124 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5125 5125 lpgeaddr, type, rw, addr, addr + len, brkcow);
5126 5126 } else {
5127 5127 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5128 5128 lpgeaddr, type, rw, addr, addr + len, brkcow);
5129 5129 if (err == IE_RETRY) {
5130 5130 ASSERT(seg->s_szc == 0);
5131 5131 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5132 5132 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5133 5133 goto top;
5134 5134 }
5135 5135 }
5136 5136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5137 5137 return (err);
5138 5138 }
5139 5139
5140 5140 page = seg_page(seg, addr);
5141 5141 if (amp != NULL) {
5142 5142 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5143 5143 anon_index = svd->anon_index + page;
5144 5144
5145 5145 if (type == F_PROT && rw == S_READ &&
5146 5146 svd->tr_state == SEGVN_TR_OFF &&
5147 5147 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5148 5148 size_t index = anon_index;
5149 5149 struct anon *ap;
5150 5150
5151 5151 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5152 5152 /*
5153 5153 * The fast path could apply to S_WRITE also, except
5154 5154 * that the protection fault could be caused by lazy
5155 5155 * tlb flush when ro->rw. In this case, the pte is
5156 5156 * RW already. But RO in the other cpu's tlb causes
5157 5157 * the fault. Since hat_chgprot won't do anything if
5158 5158 * pte doesn't change, we may end up faulting
5159 5159 * indefinitely until the RO tlb entry gets replaced.
5160 5160 */
5161 5161 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5162 5162 anon_array_enter(amp, index, &cookie);
5163 5163 ap = anon_get_ptr(amp->ahp, index);
5164 5164 anon_array_exit(&cookie);
5165 5165 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5166 5166 ANON_LOCK_EXIT(&->a_rwlock);
5167 5167 goto slow;
5168 5168 }
5169 5169 }
5170 5170 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5171 5171 ANON_LOCK_EXIT(&->a_rwlock);
5172 5172 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5173 5173 return (0);
5174 5174 }
5175 5175 }
5176 5176 slow:
5177 5177
5178 5178 if (svd->vpage == NULL)
5179 5179 vpage = NULL;
5180 5180 else
5181 5181 vpage = &svd->vpage[page];
5182 5182
5183 5183 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5184 5184
5185 5185 /*
5186 5186 * If MADV_SEQUENTIAL has been set for the particular page we
5187 5187 * are faulting on, free behind all pages in the segment and put
5188 5188 * them on the free list.
5189 5189 */
5190 5190
5191 5191 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5192 5192 struct vpage *vpp;
5193 5193 ulong_t fanon_index;
5194 5194 size_t fpage;
5195 5195 u_offset_t pgoff, fpgoff;
5196 5196 struct vnode *fvp;
5197 5197 struct anon *fap = NULL;
5198 5198
5199 5199 if (svd->advice == MADV_SEQUENTIAL ||
5200 5200 (svd->pageadvice &&
5201 5201 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5202 5202 pgoff = off - PAGESIZE;
5203 5203 fpage = page - 1;
5204 5204 if (vpage != NULL)
5205 5205 vpp = &svd->vpage[fpage];
5206 5206 if (amp != NULL)
5207 5207 fanon_index = svd->anon_index + fpage;
5208 5208
5209 5209 while (pgoff > svd->offset) {
5210 5210 if (svd->advice != MADV_SEQUENTIAL &&
5211 5211 (!svd->pageadvice || (vpage &&
5212 5212 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5213 5213 break;
5214 5214
5215 5215 /*
5216 5216 * If this is an anon page, we must find the
5217 5217 * correct <vp, offset> for it
5218 5218 */
5219 5219 fap = NULL;
5220 5220 if (amp != NULL) {
5221 5221 ANON_LOCK_ENTER(&->a_rwlock,
5222 5222 RW_READER);
5223 5223 anon_array_enter(amp, fanon_index,
5224 5224 &cookie);
5225 5225 fap = anon_get_ptr(amp->ahp,
5226 5226 fanon_index);
5227 5227 if (fap != NULL) {
5228 5228 swap_xlate(fap, &fvp, &fpgoff);
5229 5229 } else {
5230 5230 fpgoff = pgoff;
5231 5231 fvp = svd->vp;
5232 5232 }
5233 5233 anon_array_exit(&cookie);
5234 5234 ANON_LOCK_EXIT(&->a_rwlock);
5235 5235 } else {
5236 5236 fpgoff = pgoff;
5237 5237 fvp = svd->vp;
5238 5238 }
5239 5239 if (fvp == NULL)
5240 5240 break; /* XXX */
5241 5241 /*
5242 5242 * Skip pages that are free or have an
5243 5243 * "exclusive" lock.
5244 5244 */
5245 5245 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5246 5246 if (pp == NULL)
5247 5247 break;
5248 5248 /*
5249 5249 * We don't need the page_struct_lock to test
5250 5250 * as this is only advisory; even if we
5251 5251 * acquire it someone might race in and lock
5252 5252 * the page after we unlock and before the
5253 5253 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5254 5254 */
5255 5255 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5256 5256 /*
5257 5257 * Hold the vnode before releasing
5258 5258 * the page lock to prevent it from
5259 5259 * being freed and re-used by some
5260 5260 * other thread.
5261 5261 */
5262 5262 VN_HOLD(fvp);
5263 5263 page_unlock(pp);
5264 5264 /*
5265 5265 * We should build a page list
5266 5266 * to kluster putpages XXX
5267 5267 */
5268 5268 (void) VOP_PUTPAGE(fvp,
5269 5269 (offset_t)fpgoff, PAGESIZE,
5270 5270 (B_DONTNEED|B_FREE|B_ASYNC),
5271 5271 svd->cred, NULL);
5272 5272 VN_RELE(fvp);
5273 5273 } else {
5274 5274 /*
5275 5275 * XXX - Should the loop terminate if
5276 5276 * the page is `locked'?
5277 5277 */
5278 5278 page_unlock(pp);
5279 5279 }
5280 5280 --vpp;
5281 5281 --fanon_index;
5282 5282 pgoff -= PAGESIZE;
5283 5283 }
5284 5284 }
5285 5285 }
5286 5286
5287 5287 plp = pl;
5288 5288 *plp = NULL;
5289 5289 pl_alloc_sz = 0;
5290 5290
5291 5291 /*
5292 5292 * See if we need to call VOP_GETPAGE for
5293 5293 * *any* of the range being faulted on.
5294 5294 * We can skip all of this work if there
5295 5295 * was no original vnode.
5296 5296 */
5297 5297 if (svd->vp != NULL) {
5298 5298 u_offset_t vp_off;
5299 5299 size_t vp_len;
5300 5300 struct anon *ap;
5301 5301 vnode_t *vp;
5302 5302
5303 5303 vp_off = off;
5304 5304 vp_len = len;
5305 5305
5306 5306 if (amp == NULL)
5307 5307 dogetpage = 1;
5308 5308 else {
5309 5309 /*
5310 5310 * Only acquire reader lock to prevent amp->ahp
5311 5311 * from being changed. It's ok to miss pages,
5312 5312 * hence we don't do anon_array_enter
5313 5313 */
5314 5314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5315 5315 ap = anon_get_ptr(amp->ahp, anon_index);
5316 5316
5317 5317 if (len <= PAGESIZE)
5318 5318 /* inline non_anon() */
5319 5319 dogetpage = (ap == NULL);
5320 5320 else
5321 5321 dogetpage = non_anon(amp->ahp, anon_index,
5322 5322 &vp_off, &vp_len);
5323 5323 ANON_LOCK_EXIT(&->a_rwlock);
5324 5324 }
5325 5325
5326 5326 if (dogetpage) {
5327 5327 enum seg_rw arw;
5328 5328 struct as *as = seg->s_as;
5329 5329
5330 5330 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5331 5331 /*
5332 5332 * Page list won't fit in local array,
5333 5333 * allocate one of the needed size.
5334 5334 */
5335 5335 pl_alloc_sz =
5336 5336 (btop(len) + 1) * sizeof (page_t *);
5337 5337 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5338 5338 plp[0] = NULL;
5339 5339 plsz = len;
5340 5340 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5341 5341 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5342 5342 (((size_t)(addr + PAGESIZE) <
5343 5343 (size_t)(seg->s_base + seg->s_size)) &&
5344 5344 hat_probe(as->a_hat, addr + PAGESIZE))) {
5345 5345 /*
5346 5346 * Ask VOP_GETPAGE to return the exact number
5347 5347 * of pages if
5348 5348 * (a) this is a COW fault, or
5349 5349 * (b) this is a software fault, or
5350 5350 * (c) next page is already mapped.
5351 5351 */
5352 5352 plsz = len;
5353 5353 } else {
5354 5354 /*
5355 5355 * Ask VOP_GETPAGE to return adjacent pages
5356 5356 * within the segment.
5357 5357 */
5358 5358 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5359 5359 ((seg->s_base + seg->s_size) - addr));
5360 5360 ASSERT((addr + plsz) <=
5361 5361 (seg->s_base + seg->s_size));
5362 5362 }
5363 5363
5364 5364 /*
5365 5365 * Need to get some non-anonymous pages.
5366 5366 * We need to make only one call to GETPAGE to do
5367 5367 * this to prevent certain deadlocking conditions
5368 5368 * when we are doing locking. In this case
5369 5369 * non_anon() should have picked up the smallest
5370 5370 * range which includes all the non-anonymous
5371 5371 * pages in the requested range. We have to
5372 5372 * be careful regarding which rw flag to pass in
5373 5373 * because on a private mapping, the underlying
5374 5374 * object is never allowed to be written.
5375 5375 */
5376 5376 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5377 5377 arw = S_READ;
5378 5378 } else {
5379 5379 arw = rw;
5380 5380 }
5381 5381 vp = svd->vp;
5382 5382 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5383 5383 "segvn_getpage:seg %p addr %p vp %p",
5384 5384 seg, addr, vp);
5385 5385 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5386 5386 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5387 5387 svd->cred, NULL);
5388 5388 if (err) {
5389 5389 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5390 5390 segvn_pagelist_rele(plp);
5391 5391 if (pl_alloc_sz)
5392 5392 kmem_free(plp, pl_alloc_sz);
5393 5393 return (FC_MAKE_ERR(err));
5394 5394 }
5395 5395 if (svd->type == MAP_PRIVATE)
5396 5396 vpprot &= ~PROT_WRITE;
5397 5397 }
5398 5398 }
5399 5399
5400 5400 /*
5401 5401 * N.B. at this time the plp array has all the needed non-anon
5402 5402 * pages in addition to (possibly) having some adjacent pages.
5403 5403 */
5404 5404
5405 5405 /*
5406 5406 * Always acquire the anon_array_lock to prevent
5407 5407 * 2 threads from allocating separate anon slots for
5408 5408 * the same "addr".
5409 5409 *
5410 5410 * If this is a copy-on-write fault and we don't already
5411 5411 * have the anon_array_lock, acquire it to prevent the
5412 5412 * fault routine from handling multiple copy-on-write faults
5413 5413 * on the same "addr" in the same address space.
5414 5414 *
5415 5415 * Only one thread should deal with the fault since after
5416 5416 * it is handled, the other threads can acquire a translation
5417 5417 * to the newly created private page. This prevents two or
5418 5418 * more threads from creating different private pages for the
5419 5419 * same fault.
5420 5420 *
5421 5421 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5422 5422 * to prevent deadlock between this thread and another thread
5423 5423 * which has soft-locked this page and wants to acquire serial_lock.
5424 5424 * ( bug 4026339 )
5425 5425 *
5426 5426 * The fix for bug 4026339 becomes unnecessary when using the
5427 5427 * locking scheme with per amp rwlock and a global set of hash
5428 5428 * lock, anon_array_lock. If we steal a vnode page when low
5429 5429 * on memory and upgrad the page lock through page_rename,
5430 5430 * then the page is PAGE_HANDLED, nothing needs to be done
5431 5431 * for this page after returning from segvn_faultpage.
5432 5432 *
5433 5433 * But really, the page lock should be downgraded after
5434 5434 * the stolen page is page_rename'd.
5435 5435 */
5436 5436
5437 5437 if (amp != NULL)
5438 5438 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5439 5439
5440 5440 /*
5441 5441 * Ok, now loop over the address range and handle faults
5442 5442 */
5443 5443 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5444 5444 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5445 5445 type, rw, brkcow);
5446 5446 if (err) {
5447 5447 if (amp != NULL)
5448 5448 ANON_LOCK_EXIT(&->a_rwlock);
5449 5449 if (type == F_SOFTLOCK && a > addr) {
5450 5450 segvn_softunlock(seg, addr, (a - addr),
5451 5451 S_OTHER);
5452 5452 }
5453 5453 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5454 5454 segvn_pagelist_rele(plp);
5455 5455 if (pl_alloc_sz)
5456 5456 kmem_free(plp, pl_alloc_sz);
5457 5457 return (err);
5458 5458 }
5459 5459 if (vpage) {
5460 5460 vpage++;
5461 5461 } else if (svd->vpage) {
5462 5462 page = seg_page(seg, addr);
5463 5463 vpage = &svd->vpage[++page];
5464 5464 }
5465 5465 }
5466 5466
5467 5467 /* Didn't get pages from the underlying fs so we're done */
5468 5468 if (!dogetpage)
5469 5469 goto done;
5470 5470
5471 5471 /*
5472 5472 * Now handle any other pages in the list returned.
5473 5473 * If the page can be used, load up the translations now.
5474 5474 * Note that the for loop will only be entered if "plp"
5475 5475 * is pointing to a non-NULL page pointer which means that
5476 5476 * VOP_GETPAGE() was called and vpprot has been initialized.
5477 5477 */
5478 5478 if (svd->pageprot == 0)
5479 5479 prot = svd->prot & vpprot;
5480 5480
5481 5481
5482 5482 /*
5483 5483 * Large Files: diff should be unsigned value because we started
5484 5484 * supporting > 2GB segment sizes from 2.5.1 and when a
5485 5485 * large file of size > 2GB gets mapped to address space
5486 5486 * the diff value can be > 2GB.
5487 5487 */
5488 5488
5489 5489 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5490 5490 size_t diff;
5491 5491 struct anon *ap;
5492 5492 int anon_index;
5493 5493 anon_sync_obj_t cookie;
5494 5494 int hat_flag = HAT_LOAD_ADV;
5495 5495
5496 5496 if (svd->flags & MAP_TEXT) {
5497 5497 hat_flag |= HAT_LOAD_TEXT;
5498 5498 }
5499 5499
5500 5500 if (pp == PAGE_HANDLED)
5501 5501 continue;
5502 5502
5503 5503 if (svd->tr_state != SEGVN_TR_ON &&
5504 5504 pp->p_offset >= svd->offset &&
5505 5505 pp->p_offset < svd->offset + seg->s_size) {
5506 5506
5507 5507 diff = pp->p_offset - svd->offset;
5508 5508
5509 5509 /*
5510 5510 * Large Files: Following is the assertion
5511 5511 * validating the above cast.
5512 5512 */
5513 5513 ASSERT(svd->vp == pp->p_vnode);
5514 5514
5515 5515 page = btop(diff);
5516 5516 if (svd->pageprot)
5517 5517 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5518 5518
5519 5519 /*
5520 5520 * Prevent other threads in the address space from
5521 5521 * creating private pages (i.e., allocating anon slots)
5522 5522 * while we are in the process of loading translations
5523 5523 * to additional pages returned by the underlying
5524 5524 * object.
5525 5525 */
5526 5526 if (amp != NULL) {
5527 5527 anon_index = svd->anon_index + page;
5528 5528 anon_array_enter(amp, anon_index, &cookie);
5529 5529 ap = anon_get_ptr(amp->ahp, anon_index);
5530 5530 }
5531 5531 if ((amp == NULL) || (ap == NULL)) {
5532 5532 if (IS_VMODSORT(pp->p_vnode) ||
5533 5533 enable_mbit_wa) {
5534 5534 if (rw == S_WRITE)
5535 5535 hat_setmod(pp);
5536 5536 else if (rw != S_OTHER &&
5537 5537 !hat_ismod(pp))
5538 5538 prot &= ~PROT_WRITE;
5539 5539 }
5540 5540 /*
5541 5541 * Skip mapping read ahead pages marked
5542 5542 * for migration, so they will get migrated
5543 5543 * properly on fault
5544 5544 */
5545 5545 ASSERT(amp == NULL ||
5546 5546 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5547 5547 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5548 5548 hat_memload_region(hat,
5549 5549 seg->s_base + diff,
5550 5550 pp, prot, hat_flag,
5551 5551 svd->rcookie);
5552 5552 }
5553 5553 }
5554 5554 if (amp != NULL)
5555 5555 anon_array_exit(&cookie);
5556 5556 }
5557 5557 page_unlock(pp);
5558 5558 }
5559 5559 done:
5560 5560 if (amp != NULL)
5561 5561 ANON_LOCK_EXIT(&->a_rwlock);
5562 5562 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5563 5563 if (pl_alloc_sz)
5564 5564 kmem_free(plp, pl_alloc_sz);
5565 5565 return (0);
5566 5566 }
5567 5567
5568 5568 /*
5569 5569 * This routine is used to start I/O on pages asynchronously. XXX it will
5570 5570 * only create PAGESIZE pages. At fault time they will be relocated into
5571 5571 * larger pages.
5572 5572 */
5573 5573 static faultcode_t
5574 5574 segvn_faulta(struct seg *seg, caddr_t addr)
5575 5575 {
5576 5576 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5577 5577 int err;
5578 5578 struct anon_map *amp;
5579 5579 vnode_t *vp;
5580 5580
5581 5581 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5582 5582
5583 5583 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5584 5584 if ((amp = svd->amp) != NULL) {
5585 5585 struct anon *ap;
5586 5586
5587 5587 /*
5588 5588 * Reader lock to prevent amp->ahp from being changed.
5589 5589 * This is advisory, it's ok to miss a page, so
5590 5590 * we don't do anon_array_enter lock.
5591 5591 */
5592 5592 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5593 5593 if ((ap = anon_get_ptr(amp->ahp,
5594 5594 svd->anon_index + seg_page(seg, addr))) != NULL) {
5595 5595
5596 5596 err = anon_getpage(&ap, NULL, NULL,
5597 5597 0, seg, addr, S_READ, svd->cred);
5598 5598
5599 5599 ANON_LOCK_EXIT(&->a_rwlock);
5600 5600 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5601 5601 if (err)
5602 5602 return (FC_MAKE_ERR(err));
5603 5603 return (0);
5604 5604 }
5605 5605 ANON_LOCK_EXIT(&->a_rwlock);
5606 5606 }
5607 5607
5608 5608 if (svd->vp == NULL) {
5609 5609 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5610 5610 return (0); /* zfod page - do nothing now */
5611 5611 }
5612 5612
5613 5613 vp = svd->vp;
5614 5614 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5615 5615 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5616 5616 err = VOP_GETPAGE(vp,
5617 5617 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5618 5618 PAGESIZE, NULL, NULL, 0, seg, addr,
5619 5619 S_OTHER, svd->cred, NULL);
5620 5620
5621 5621 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5622 5622 if (err)
5623 5623 return (FC_MAKE_ERR(err));
5624 5624 return (0);
5625 5625 }
5626 5626
5627 5627 static int
5628 5628 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5629 5629 {
5630 5630 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5631 5631 struct vpage *cvp, *svp, *evp;
5632 5632 struct vnode *vp;
5633 5633 size_t pgsz;
5634 5634 pgcnt_t pgcnt;
5635 5635 anon_sync_obj_t cookie;
5636 5636 int unload_done = 0;
5637 5637
5638 5638 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5639 5639
5640 5640 if ((svd->maxprot & prot) != prot)
5641 5641 return (EACCES); /* violated maxprot */
5642 5642
5643 5643 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5644 5644
5645 5645 /* return if prot is the same */
5646 5646 if (!svd->pageprot && svd->prot == prot) {
5647 5647 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5648 5648 return (0);
5649 5649 }
5650 5650
5651 5651 /*
5652 5652 * Since we change protections we first have to flush the cache.
5653 5653 * This makes sure all the pagelock calls have to recheck
5654 5654 * protections.
5655 5655 */
5656 5656 if (svd->softlockcnt > 0) {
5657 5657 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5658 5658
5659 5659 /*
5660 5660 * If this is shared segment non 0 softlockcnt
5661 5661 * means locked pages are still in use.
5662 5662 */
5663 5663 if (svd->type == MAP_SHARED) {
5664 5664 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5665 5665 return (EAGAIN);
5666 5666 }
5667 5667
5668 5668 /*
5669 5669 * Since we do have the segvn writers lock nobody can fill
5670 5670 * the cache with entries belonging to this seg during
5671 5671 * the purge. The flush either succeeds or we still have
5672 5672 * pending I/Os.
5673 5673 */
5674 5674 segvn_purge(seg);
5675 5675 if (svd->softlockcnt > 0) {
5676 5676 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5677 5677 return (EAGAIN);
5678 5678 }
5679 5679 }
5680 5680
5681 5681 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5682 5682 ASSERT(svd->amp == NULL);
5683 5683 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5684 5684 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5685 5685 HAT_REGION_TEXT);
5686 5686 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5687 5687 unload_done = 1;
5688 5688 } else if (svd->tr_state == SEGVN_TR_INIT) {
5689 5689 svd->tr_state = SEGVN_TR_OFF;
5690 5690 } else if (svd->tr_state == SEGVN_TR_ON) {
5691 5691 ASSERT(svd->amp != NULL);
5692 5692 segvn_textunrepl(seg, 0);
5693 5693 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5694 5694 unload_done = 1;
5695 5695 }
5696 5696
5697 5697 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5698 5698 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5699 5699 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5700 5700 segvn_inval_trcache(svd->vp);
5701 5701 }
5702 5702 if (seg->s_szc != 0) {
5703 5703 int err;
5704 5704 pgsz = page_get_pagesize(seg->s_szc);
5705 5705 pgcnt = pgsz >> PAGESHIFT;
5706 5706 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5707 5707 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5708 5708 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5709 5709 ASSERT(seg->s_base != addr || seg->s_size != len);
5710 5710 /*
5711 5711 * If we are holding the as lock as a reader then
5712 5712 * we need to return IE_RETRY and let the as
5713 5713 * layer drop and re-acquire the lock as a writer.
5714 5714 */
5715 5715 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5716 5716 return (IE_RETRY);
5717 5717 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5718 5718 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5719 5719 err = segvn_demote_range(seg, addr, len,
5720 5720 SDR_END, 0);
5721 5721 } else {
5722 5722 uint_t szcvec = map_pgszcvec(seg->s_base,
5723 5723 pgsz, (uintptr_t)seg->s_base,
5724 5724 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5725 5725 err = segvn_demote_range(seg, addr, len,
5726 5726 SDR_END, szcvec);
5727 5727 }
5728 5728 if (err == 0)
5729 5729 return (IE_RETRY);
5730 5730 if (err == ENOMEM)
5731 5731 return (IE_NOMEM);
5732 5732 return (err);
5733 5733 }
5734 5734 }
5735 5735
5736 5736
5737 5737 /*
5738 5738 * If it's a private mapping and we're making it writable then we
5739 5739 * may have to reserve the additional swap space now. If we are
5740 5740 * making writable only a part of the segment then we use its vpage
5741 5741 * array to keep a record of the pages for which we have reserved
5742 5742 * swap. In this case we set the pageswap field in the segment's
5743 5743 * segvn structure to record this.
5744 5744 *
5745 5745 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5746 5746 * removing write permission on the entire segment and we haven't
5747 5747 * modified any pages, we can release the swap space.
5748 5748 */
5749 5749 if (svd->type == MAP_PRIVATE) {
5750 5750 if (prot & PROT_WRITE) {
5751 5751 if (!(svd->flags & MAP_NORESERVE) &&
5752 5752 !(svd->swresv && svd->pageswap == 0)) {
5753 5753 size_t sz = 0;
5754 5754
5755 5755 /*
5756 5756 * Start by determining how much swap
5757 5757 * space is required.
5758 5758 */
5759 5759 if (addr == seg->s_base &&
5760 5760 len == seg->s_size &&
5761 5761 svd->pageswap == 0) {
5762 5762 /* The whole segment */
5763 5763 sz = seg->s_size;
5764 5764 } else {
5765 5765 /*
5766 5766 * Make sure that the vpage array
5767 5767 * exists, and make a note of the
5768 5768 * range of elements corresponding
5769 5769 * to len.
5770 5770 */
5771 5771 segvn_vpage(seg);
5772 5772 svp = &svd->vpage[seg_page(seg, addr)];
5773 5773 evp = &svd->vpage[seg_page(seg,
5774 5774 addr + len)];
5775 5775
5776 5776 if (svd->pageswap == 0) {
5777 5777 /*
5778 5778 * This is the first time we've
5779 5779 * asked for a part of this
5780 5780 * segment, so we need to
5781 5781 * reserve everything we've
5782 5782 * been asked for.
5783 5783 */
5784 5784 sz = len;
5785 5785 } else {
5786 5786 /*
5787 5787 * We have to count the number
5788 5788 * of pages required.
5789 5789 */
5790 5790 for (cvp = svp; cvp < evp;
5791 5791 cvp++) {
5792 5792 if (!VPP_ISSWAPRES(cvp))
5793 5793 sz++;
5794 5794 }
5795 5795 sz <<= PAGESHIFT;
5796 5796 }
5797 5797 }
5798 5798
5799 5799 /* Try to reserve the necessary swap. */
5800 5800 if (anon_resv_zone(sz,
5801 5801 seg->s_as->a_proc->p_zone) == 0) {
5802 5802 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5803 5803 return (IE_NOMEM);
5804 5804 }
5805 5805
5806 5806 /*
5807 5807 * Make a note of how much swap space
5808 5808 * we've reserved.
5809 5809 */
5810 5810 if (svd->pageswap == 0 && sz == seg->s_size) {
5811 5811 svd->swresv = sz;
5812 5812 } else {
5813 5813 ASSERT(svd->vpage != NULL);
5814 5814 svd->swresv += sz;
5815 5815 svd->pageswap = 1;
5816 5816 for (cvp = svp; cvp < evp; cvp++) {
5817 5817 if (!VPP_ISSWAPRES(cvp))
5818 5818 VPP_SETSWAPRES(cvp);
5819 5819 }
5820 5820 }
5821 5821 }
5822 5822 } else {
5823 5823 /*
5824 5824 * Swap space is released only if this segment
5825 5825 * does not map anonymous memory, since read faults
5826 5826 * on such segments still need an anon slot to read
5827 5827 * in the data.
5828 5828 */
5829 5829 if (svd->swresv != 0 && svd->vp != NULL &&
5830 5830 svd->amp == NULL && addr == seg->s_base &&
5831 5831 len == seg->s_size && svd->pageprot == 0) {
5832 5832 ASSERT(svd->pageswap == 0);
5833 5833 anon_unresv_zone(svd->swresv,
5834 5834 seg->s_as->a_proc->p_zone);
5835 5835 svd->swresv = 0;
5836 5836 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5837 5837 "anon proc:%p %lu %u", seg, 0, 0);
5838 5838 }
5839 5839 }
5840 5840 }
5841 5841
5842 5842 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5843 5843 if (svd->prot == prot) {
5844 5844 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5845 5845 return (0); /* all done */
5846 5846 }
5847 5847 svd->prot = (uchar_t)prot;
5848 5848 } else if (svd->type == MAP_PRIVATE) {
5849 5849 struct anon *ap = NULL;
5850 5850 page_t *pp;
5851 5851 u_offset_t offset, off;
5852 5852 struct anon_map *amp;
5853 5853 ulong_t anon_idx = 0;
5854 5854
5855 5855 /*
5856 5856 * A vpage structure exists or else the change does not
5857 5857 * involve the entire segment. Establish a vpage structure
5858 5858 * if none is there. Then, for each page in the range,
5859 5859 * adjust its individual permissions. Note that write-
5860 5860 * enabling a MAP_PRIVATE page can affect the claims for
5861 5861 * locked down memory. Overcommitting memory terminates
5862 5862 * the operation.
5863 5863 */
5864 5864 segvn_vpage(seg);
5865 5865 svd->pageprot = 1;
5866 5866 if ((amp = svd->amp) != NULL) {
5867 5867 anon_idx = svd->anon_index + seg_page(seg, addr);
5868 5868 ASSERT(seg->s_szc == 0 ||
5869 5869 IS_P2ALIGNED(anon_idx, pgcnt));
5870 5870 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5871 5871 }
5872 5872
5873 5873 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5874 5874 evp = &svd->vpage[seg_page(seg, addr + len)];
5875 5875
5876 5876 /*
5877 5877 * See Statement at the beginning of segvn_lockop regarding
5878 5878 * the way cowcnts and lckcnts are handled.
5879 5879 */
5880 5880 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5881 5881
5882 5882 if (seg->s_szc != 0) {
5883 5883 if (amp != NULL) {
5884 5884 anon_array_enter(amp, anon_idx,
5885 5885 &cookie);
5886 5886 }
5887 5887 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5888 5888 !segvn_claim_pages(seg, svp, offset,
5889 5889 anon_idx, prot)) {
5890 5890 if (amp != NULL) {
5891 5891 anon_array_exit(&cookie);
5892 5892 }
5893 5893 break;
5894 5894 }
5895 5895 if (amp != NULL) {
5896 5896 anon_array_exit(&cookie);
5897 5897 }
5898 5898 anon_idx++;
5899 5899 } else {
5900 5900 if (amp != NULL) {
5901 5901 anon_array_enter(amp, anon_idx,
5902 5902 &cookie);
5903 5903 ap = anon_get_ptr(amp->ahp, anon_idx++);
5904 5904 }
5905 5905
5906 5906 if (VPP_ISPPLOCK(svp) &&
5907 5907 VPP_PROT(svp) != prot) {
5908 5908
5909 5909 if (amp == NULL || ap == NULL) {
5910 5910 vp = svd->vp;
5911 5911 off = offset;
5912 5912 } else
5913 5913 swap_xlate(ap, &vp, &off);
5914 5914 if (amp != NULL)
5915 5915 anon_array_exit(&cookie);
5916 5916
5917 5917 if ((pp = page_lookup(vp, off,
5918 5918 SE_SHARED)) == NULL) {
5919 5919 panic("segvn_setprot: no page");
5920 5920 /*NOTREACHED*/
5921 5921 }
5922 5922 ASSERT(seg->s_szc == 0);
5923 5923 if ((VPP_PROT(svp) ^ prot) &
5924 5924 PROT_WRITE) {
5925 5925 if (prot & PROT_WRITE) {
5926 5926 if (!page_addclaim(
5927 5927 pp)) {
5928 5928 page_unlock(pp);
5929 5929 break;
5930 5930 }
5931 5931 } else {
5932 5932 if (!page_subclaim(
5933 5933 pp)) {
5934 5934 page_unlock(pp);
5935 5935 break;
5936 5936 }
5937 5937 }
5938 5938 }
5939 5939 page_unlock(pp);
5940 5940 } else if (amp != NULL)
5941 5941 anon_array_exit(&cookie);
5942 5942 }
5943 5943 VPP_SETPROT(svp, prot);
5944 5944 offset += PAGESIZE;
5945 5945 }
5946 5946 if (amp != NULL)
5947 5947 ANON_LOCK_EXIT(&->a_rwlock);
5948 5948
5949 5949 /*
5950 5950 * Did we terminate prematurely? If so, simply unload
5951 5951 * the translations to the things we've updated so far.
5952 5952 */
5953 5953 if (svp != evp) {
5954 5954 if (unload_done) {
5955 5955 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5956 5956 return (IE_NOMEM);
5957 5957 }
5958 5958 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
5959 5959 PAGESIZE;
5960 5960 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
5961 5961 if (len != 0)
5962 5962 hat_unload(seg->s_as->a_hat, addr,
5963 5963 len, HAT_UNLOAD);
5964 5964 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5965 5965 return (IE_NOMEM);
5966 5966 }
5967 5967 } else {
5968 5968 segvn_vpage(seg);
5969 5969 svd->pageprot = 1;
5970 5970 evp = &svd->vpage[seg_page(seg, addr + len)];
5971 5971 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5972 5972 VPP_SETPROT(svp, prot);
5973 5973 }
5974 5974 }
5975 5975
5976 5976 if (unload_done) {
5977 5977 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5978 5978 return (0);
5979 5979 }
5980 5980
5981 5981 if (((prot & PROT_WRITE) != 0 &&
5982 5982 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
5983 5983 (prot & ~PROT_USER) == PROT_NONE) {
5984 5984 /*
5985 5985 * Either private or shared data with write access (in
5986 5986 * which case we need to throw out all former translations
5987 5987 * so that we get the right translations set up on fault
5988 5988 * and we don't allow write access to any copy-on-write pages
5989 5989 * that might be around or to prevent write access to pages
5990 5990 * representing holes in a file), or we don't have permission
5991 5991 * to access the memory at all (in which case we have to
5992 5992 * unload any current translations that might exist).
5993 5993 */
5994 5994 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
5995 5995 } else {
5996 5996 /*
5997 5997 * A shared mapping or a private mapping in which write
5998 5998 * protection is going to be denied - just change all the
5999 5999 * protections over the range of addresses in question.
6000 6000 * segvn does not support any other attributes other
6001 6001 * than prot so we can use hat_chgattr.
6002 6002 */
6003 6003 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6004 6004 }
6005 6005
6006 6006 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6007 6007
6008 6008 return (0);
6009 6009 }
6010 6010
6011 6011 /*
6012 6012 * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6013 6013 * to determine if the seg is capable of mapping the requested szc.
6014 6014 */
6015 6015 static int
6016 6016 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6017 6017 {
6018 6018 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6019 6019 struct segvn_data *nsvd;
6020 6020 struct anon_map *amp = svd->amp;
6021 6021 struct seg *nseg;
6022 6022 caddr_t eaddr = addr + len, a;
6023 6023 size_t pgsz = page_get_pagesize(szc);
6024 6024 pgcnt_t pgcnt = page_get_pagecnt(szc);
6025 6025 int err;
6026 6026 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6027 6027
6028 6028 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6029 6029 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6030 6030
6031 6031 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6032 6032 return (0);
6033 6033 }
6034 6034
6035 6035 /*
6036 6036 * addr should always be pgsz aligned but eaddr may be misaligned if
6037 6037 * it's at the end of the segment.
6038 6038 *
6039 6039 * XXX we should assert this condition since as_setpagesize() logic
6040 6040 * guarantees it.
6041 6041 */
6042 6042 if (!IS_P2ALIGNED(addr, pgsz) ||
6043 6043 (!IS_P2ALIGNED(eaddr, pgsz) &&
6044 6044 eaddr != seg->s_base + seg->s_size)) {
6045 6045
6046 6046 segvn_setpgsz_align_err++;
6047 6047 return (EINVAL);
6048 6048 }
6049 6049
6050 6050 if (amp != NULL && svd->type == MAP_SHARED) {
6051 6051 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6052 6052 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6053 6053
6054 6054 segvn_setpgsz_anon_align_err++;
6055 6055 return (EINVAL);
6056 6056 }
6057 6057 }
6058 6058
6059 6059 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6060 6060 szc > segvn_maxpgszc) {
6061 6061 return (EINVAL);
6062 6062 }
6063 6063
6064 6064 /* paranoid check */
6065 6065 if (svd->vp != NULL &&
6066 6066 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6067 6067 return (EINVAL);
6068 6068 }
6069 6069
6070 6070 if (seg->s_szc == 0 && svd->vp != NULL &&
6071 6071 map_addr_vacalign_check(addr, off)) {
6072 6072 return (EINVAL);
6073 6073 }
6074 6074
6075 6075 /*
6076 6076 * Check that protections are the same within new page
6077 6077 * size boundaries.
6078 6078 */
6079 6079 if (svd->pageprot) {
6080 6080 for (a = addr; a < eaddr; a += pgsz) {
6081 6081 if ((a + pgsz) > eaddr) {
6082 6082 if (!sameprot(seg, a, eaddr - a)) {
6083 6083 return (EINVAL);
6084 6084 }
6085 6085 } else {
6086 6086 if (!sameprot(seg, a, pgsz)) {
6087 6087 return (EINVAL);
6088 6088 }
6089 6089 }
6090 6090 }
6091 6091 }
6092 6092
6093 6093 /*
6094 6094 * Since we are changing page size we first have to flush
6095 6095 * the cache. This makes sure all the pagelock calls have
6096 6096 * to recheck protections.
6097 6097 */
6098 6098 if (svd->softlockcnt > 0) {
6099 6099 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6100 6100
6101 6101 /*
6102 6102 * If this is shared segment non 0 softlockcnt
6103 6103 * means locked pages are still in use.
6104 6104 */
6105 6105 if (svd->type == MAP_SHARED) {
6106 6106 return (EAGAIN);
6107 6107 }
6108 6108
6109 6109 /*
6110 6110 * Since we do have the segvn writers lock nobody can fill
6111 6111 * the cache with entries belonging to this seg during
6112 6112 * the purge. The flush either succeeds or we still have
6113 6113 * pending I/Os.
6114 6114 */
6115 6115 segvn_purge(seg);
6116 6116 if (svd->softlockcnt > 0) {
6117 6117 return (EAGAIN);
6118 6118 }
6119 6119 }
6120 6120
6121 6121 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6122 6122 ASSERT(svd->amp == NULL);
6123 6123 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6124 6124 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6125 6125 HAT_REGION_TEXT);
6126 6126 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6127 6127 } else if (svd->tr_state == SEGVN_TR_INIT) {
6128 6128 svd->tr_state = SEGVN_TR_OFF;
6129 6129 } else if (svd->tr_state == SEGVN_TR_ON) {
6130 6130 ASSERT(svd->amp != NULL);
6131 6131 segvn_textunrepl(seg, 1);
6132 6132 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6133 6133 amp = NULL;
6134 6134 }
6135 6135
6136 6136 /*
6137 6137 * Operation for sub range of existing segment.
6138 6138 */
6139 6139 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6140 6140 if (szc < seg->s_szc) {
6141 6141 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6142 6142 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6143 6143 if (err == 0) {
6144 6144 return (IE_RETRY);
6145 6145 }
6146 6146 if (err == ENOMEM) {
6147 6147 return (IE_NOMEM);
6148 6148 }
6149 6149 return (err);
6150 6150 }
6151 6151 if (addr != seg->s_base) {
6152 6152 nseg = segvn_split_seg(seg, addr);
6153 6153 if (eaddr != (nseg->s_base + nseg->s_size)) {
6154 6154 /* eaddr is szc aligned */
6155 6155 (void) segvn_split_seg(nseg, eaddr);
6156 6156 }
6157 6157 return (IE_RETRY);
6158 6158 }
6159 6159 if (eaddr != (seg->s_base + seg->s_size)) {
6160 6160 /* eaddr is szc aligned */
6161 6161 (void) segvn_split_seg(seg, eaddr);
6162 6162 }
6163 6163 return (IE_RETRY);
6164 6164 }
6165 6165
6166 6166 /*
6167 6167 * Break any low level sharing and reset seg->s_szc to 0.
6168 6168 */
6169 6169 if ((err = segvn_clrszc(seg)) != 0) {
6170 6170 if (err == ENOMEM) {
6171 6171 err = IE_NOMEM;
6172 6172 }
6173 6173 return (err);
6174 6174 }
6175 6175 ASSERT(seg->s_szc == 0);
6176 6176
6177 6177 /*
6178 6178 * If the end of the current segment is not pgsz aligned
6179 6179 * then attempt to concatenate with the next segment.
6180 6180 */
6181 6181 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6182 6182 nseg = AS_SEGNEXT(seg->s_as, seg);
6183 6183 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6184 6184 return (ENOMEM);
6185 6185 }
6186 6186 if (nseg->s_ops != &segvn_ops) {
6187 6187 return (EINVAL);
6188 6188 }
6189 6189 nsvd = (struct segvn_data *)nseg->s_data;
6190 6190 if (nsvd->softlockcnt > 0) {
6191 6191 /*
6192 6192 * If this is shared segment non 0 softlockcnt
6193 6193 * means locked pages are still in use.
6194 6194 */
6195 6195 if (nsvd->type == MAP_SHARED) {
6196 6196 return (EAGAIN);
6197 6197 }
6198 6198 segvn_purge(nseg);
6199 6199 if (nsvd->softlockcnt > 0) {
6200 6200 return (EAGAIN);
6201 6201 }
6202 6202 }
6203 6203 err = segvn_clrszc(nseg);
6204 6204 if (err == ENOMEM) {
6205 6205 err = IE_NOMEM;
6206 6206 }
6207 6207 if (err != 0) {
6208 6208 return (err);
6209 6209 }
6210 6210 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6211 6211 err = segvn_concat(seg, nseg, 1);
6212 6212 if (err == -1) {
6213 6213 return (EINVAL);
6214 6214 }
6215 6215 if (err == -2) {
6216 6216 return (IE_NOMEM);
6217 6217 }
6218 6218 return (IE_RETRY);
6219 6219 }
6220 6220
6221 6221 /*
6222 6222 * May need to re-align anon array to
6223 6223 * new szc.
6224 6224 */
6225 6225 if (amp != NULL) {
6226 6226 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6227 6227 struct anon_hdr *nahp;
6228 6228
6229 6229 ASSERT(svd->type == MAP_PRIVATE);
6230 6230
6231 6231 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6232 6232 ASSERT(amp->refcnt == 1);
6233 6233 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6234 6234 if (nahp == NULL) {
6235 6235 ANON_LOCK_EXIT(&->a_rwlock);
6236 6236 return (IE_NOMEM);
6237 6237 }
6238 6238 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6239 6239 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6240 6240 anon_release(nahp, btop(amp->size));
6241 6241 ANON_LOCK_EXIT(&->a_rwlock);
6242 6242 return (IE_NOMEM);
6243 6243 }
6244 6244 anon_release(amp->ahp, btop(amp->size));
6245 6245 amp->ahp = nahp;
6246 6246 svd->anon_index = 0;
6247 6247 ANON_LOCK_EXIT(&->a_rwlock);
6248 6248 }
6249 6249 }
6250 6250 if (svd->vp != NULL && szc != 0) {
6251 6251 struct vattr va;
6252 6252 u_offset_t eoffpage = svd->offset;
6253 6253 va.va_mask = AT_SIZE;
6254 6254 eoffpage += seg->s_size;
6255 6255 eoffpage = btopr(eoffpage);
6256 6256 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6257 6257 segvn_setpgsz_getattr_err++;
6258 6258 return (EINVAL);
6259 6259 }
6260 6260 if (btopr(va.va_size) < eoffpage) {
6261 6261 segvn_setpgsz_eof_err++;
6262 6262 return (EINVAL);
6263 6263 }
6264 6264 if (amp != NULL) {
6265 6265 /*
6266 6266 * anon_fill_cow_holes() may call VOP_GETPAGE().
6267 6267 * don't take anon map lock here to avoid holding it
6268 6268 * across VOP_GETPAGE() calls that may call back into
6269 6269 * segvn for klsutering checks. We don't really need
6270 6270 * anon map lock here since it's a private segment and
6271 6271 * we hold as level lock as writers.
6272 6272 */
6273 6273 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6274 6274 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6275 6275 seg->s_size, szc, svd->prot, svd->vpage,
6276 6276 svd->cred)) != 0) {
6277 6277 return (EINVAL);
6278 6278 }
6279 6279 }
6280 6280 segvn_setvnode_mpss(svd->vp);
6281 6281 }
6282 6282
6283 6283 if (amp != NULL) {
6284 6284 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6285 6285 if (svd->type == MAP_PRIVATE) {
6286 6286 amp->a_szc = szc;
6287 6287 } else if (szc > amp->a_szc) {
6288 6288 amp->a_szc = szc;
6289 6289 }
6290 6290 ANON_LOCK_EXIT(&->a_rwlock);
6291 6291 }
6292 6292
6293 6293 seg->s_szc = szc;
6294 6294
6295 6295 return (0);
6296 6296 }
6297 6297
6298 6298 static int
6299 6299 segvn_clrszc(struct seg *seg)
6300 6300 {
6301 6301 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6302 6302 struct anon_map *amp = svd->amp;
6303 6303 size_t pgsz;
6304 6304 pgcnt_t pages;
6305 6305 int err = 0;
6306 6306 caddr_t a = seg->s_base;
6307 6307 caddr_t ea = a + seg->s_size;
6308 6308 ulong_t an_idx = svd->anon_index;
6309 6309 vnode_t *vp = svd->vp;
6310 6310 struct vpage *vpage = svd->vpage;
6311 6311 page_t *anon_pl[1 + 1], *pp;
6312 6312 struct anon *ap, *oldap;
6313 6313 uint_t prot = svd->prot, vpprot;
6314 6314 int pageflag = 0;
6315 6315
6316 6316 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6317 6317 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6318 6318 ASSERT(svd->softlockcnt == 0);
6319 6319
6320 6320 if (vp == NULL && amp == NULL) {
6321 6321 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6322 6322 seg->s_szc = 0;
6323 6323 return (0);
6324 6324 }
6325 6325
6326 6326 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6327 6327 ASSERT(svd->amp == NULL);
6328 6328 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6329 6329 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6330 6330 HAT_REGION_TEXT);
6331 6331 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6332 6332 } else if (svd->tr_state == SEGVN_TR_ON) {
6333 6333 ASSERT(svd->amp != NULL);
6334 6334 segvn_textunrepl(seg, 1);
6335 6335 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6336 6336 amp = NULL;
6337 6337 } else {
6338 6338 if (svd->tr_state != SEGVN_TR_OFF) {
6339 6339 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6340 6340 svd->tr_state = SEGVN_TR_OFF;
6341 6341 }
6342 6342
6343 6343 /*
6344 6344 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6345 6345 * unload argument is 0 when we are freeing the segment
6346 6346 * and unload was already done.
6347 6347 */
6348 6348 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6349 6349 HAT_UNLOAD_UNMAP);
6350 6350 }
6351 6351
6352 6352 if (amp == NULL || svd->type == MAP_SHARED) {
6353 6353 seg->s_szc = 0;
6354 6354 return (0);
6355 6355 }
6356 6356
6357 6357 pgsz = page_get_pagesize(seg->s_szc);
6358 6358 pages = btop(pgsz);
6359 6359
6360 6360 /*
6361 6361 * XXX anon rwlock is not really needed because this is a
6362 6362 * private segment and we are writers.
6363 6363 */
6364 6364 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6365 6365
6366 6366 for (; a < ea; a += pgsz, an_idx += pages) {
6367 6367 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6368 6368 ASSERT(vpage != NULL || svd->pageprot == 0);
6369 6369 if (vpage != NULL) {
6370 6370 ASSERT(sameprot(seg, a, pgsz));
6371 6371 prot = VPP_PROT(vpage);
6372 6372 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6373 6373 }
6374 6374 if (seg->s_szc != 0) {
6375 6375 ASSERT(vp == NULL || anon_pages(amp->ahp,
6376 6376 an_idx, pages) == pages);
6377 6377 if ((err = anon_map_demotepages(amp, an_idx,
6378 6378 seg, a, prot, vpage, svd->cred)) != 0) {
6379 6379 goto out;
6380 6380 }
6381 6381 } else {
6382 6382 if (oldap->an_refcnt == 1) {
6383 6383 continue;
6384 6384 }
6385 6385 if ((err = anon_getpage(&oldap, &vpprot,
6386 6386 anon_pl, PAGESIZE, seg, a, S_READ,
6387 6387 svd->cred))) {
6388 6388 goto out;
6389 6389 }
6390 6390 if ((pp = anon_private(&ap, seg, a, prot,
6391 6391 anon_pl[0], pageflag, svd->cred)) == NULL) {
6392 6392 err = ENOMEM;
6393 6393 goto out;
6394 6394 }
6395 6395 anon_decref(oldap);
6396 6396 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6397 6397 ANON_SLEEP);
6398 6398 page_unlock(pp);
6399 6399 }
6400 6400 }
6401 6401 vpage = (vpage == NULL) ? NULL : vpage + pages;
6402 6402 }
6403 6403
6404 6404 amp->a_szc = 0;
6405 6405 seg->s_szc = 0;
6406 6406 out:
6407 6407 ANON_LOCK_EXIT(&->a_rwlock);
6408 6408 return (err);
6409 6409 }
6410 6410
6411 6411 static int
6412 6412 segvn_claim_pages(
6413 6413 struct seg *seg,
6414 6414 struct vpage *svp,
6415 6415 u_offset_t off,
6416 6416 ulong_t anon_idx,
6417 6417 uint_t prot)
6418 6418 {
6419 6419 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6420 6420 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6421 6421 page_t **ppa;
6422 6422 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6423 6423 struct anon_map *amp = svd->amp;
6424 6424 struct vpage *evp = svp + pgcnt;
6425 6425 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6426 6426 + seg->s_base;
6427 6427 struct anon *ap;
6428 6428 struct vnode *vp = svd->vp;
6429 6429 page_t *pp;
6430 6430 pgcnt_t pg_idx, i;
6431 6431 int err = 0;
6432 6432 anoff_t aoff;
6433 6433 int anon = (amp != NULL) ? 1 : 0;
6434 6434
6435 6435 ASSERT(svd->type == MAP_PRIVATE);
6436 6436 ASSERT(svd->vpage != NULL);
6437 6437 ASSERT(seg->s_szc != 0);
6438 6438 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6439 6439 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6440 6440 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6441 6441
6442 6442 if (VPP_PROT(svp) == prot)
6443 6443 return (1);
6444 6444 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6445 6445 return (1);
6446 6446
6447 6447 ppa = kmem_alloc(ppasize, KM_SLEEP);
6448 6448 if (anon && vp != NULL) {
6449 6449 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6450 6450 anon = 0;
6451 6451 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6452 6452 }
6453 6453 ASSERT(!anon ||
6454 6454 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6455 6455 }
6456 6456
6457 6457 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6458 6458 if (!VPP_ISPPLOCK(svp))
6459 6459 continue;
6460 6460 if (anon) {
6461 6461 ap = anon_get_ptr(amp->ahp, anon_idx);
6462 6462 if (ap == NULL) {
6463 6463 panic("segvn_claim_pages: no anon slot");
6464 6464 }
6465 6465 swap_xlate(ap, &vp, &aoff);
6466 6466 off = (u_offset_t)aoff;
6467 6467 }
6468 6468 ASSERT(vp != NULL);
6469 6469 if ((pp = page_lookup(vp,
6470 6470 (u_offset_t)off, SE_SHARED)) == NULL) {
6471 6471 panic("segvn_claim_pages: no page");
6472 6472 }
6473 6473 ppa[pg_idx++] = pp;
6474 6474 off += PAGESIZE;
6475 6475 }
6476 6476
6477 6477 if (ppa[0] == NULL) {
6478 6478 kmem_free(ppa, ppasize);
6479 6479 return (1);
6480 6480 }
6481 6481
6482 6482 ASSERT(pg_idx <= pgcnt);
6483 6483 ppa[pg_idx] = NULL;
6484 6484
6485 6485
6486 6486 /* Find each large page within ppa, and adjust its claim */
6487 6487
6488 6488 /* Does ppa cover a single large page? */
6489 6489 if (ppa[0]->p_szc == seg->s_szc) {
6490 6490 if (prot & PROT_WRITE)
6491 6491 err = page_addclaim_pages(ppa);
6492 6492 else
6493 6493 err = page_subclaim_pages(ppa);
6494 6494 } else {
6495 6495 for (i = 0; ppa[i]; i += pgcnt) {
6496 6496 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6497 6497 if (prot & PROT_WRITE)
6498 6498 err = page_addclaim_pages(&ppa[i]);
6499 6499 else
6500 6500 err = page_subclaim_pages(&ppa[i]);
6501 6501 if (err == 0)
6502 6502 break;
6503 6503 }
6504 6504 }
6505 6505
6506 6506 for (i = 0; i < pg_idx; i++) {
6507 6507 ASSERT(ppa[i] != NULL);
6508 6508 page_unlock(ppa[i]);
6509 6509 }
6510 6510
6511 6511 kmem_free(ppa, ppasize);
6512 6512 return (err);
6513 6513 }
6514 6514
6515 6515 /*
6516 6516 * Returns right (upper address) segment if split occurred.
6517 6517 * If the address is equal to the beginning or end of its segment it returns
6518 6518 * the current segment.
6519 6519 */
6520 6520 static struct seg *
6521 6521 segvn_split_seg(struct seg *seg, caddr_t addr)
6522 6522 {
6523 6523 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6524 6524 struct seg *nseg;
6525 6525 size_t nsize;
6526 6526 struct segvn_data *nsvd;
6527 6527
6528 6528 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6529 6529 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6530 6530
6531 6531 ASSERT(addr >= seg->s_base);
6532 6532 ASSERT(addr <= seg->s_base + seg->s_size);
6533 6533 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6534 6534
6535 6535 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6536 6536 return (seg);
6537 6537
6538 6538 nsize = seg->s_base + seg->s_size - addr;
6539 6539 seg->s_size = addr - seg->s_base;
6540 6540 nseg = seg_alloc(seg->s_as, addr, nsize);
6541 6541 ASSERT(nseg != NULL);
6542 6542 nseg->s_ops = seg->s_ops;
6543 6543 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6544 6544 nseg->s_data = (void *)nsvd;
6545 6545 nseg->s_szc = seg->s_szc;
6546 6546 *nsvd = *svd;
6547 6547 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6548 6548 nsvd->seg = nseg;
6549 6549 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6550 6550
6551 6551 if (nsvd->vp != NULL) {
6552 6552 VN_HOLD(nsvd->vp);
6553 6553 nsvd->offset = svd->offset +
6554 6554 (uintptr_t)(nseg->s_base - seg->s_base);
6555 6555 if (nsvd->type == MAP_SHARED)
6556 6556 lgrp_shm_policy_init(NULL, nsvd->vp);
6557 6557 } else {
6558 6558 /*
6559 6559 * The offset for an anonymous segment has no signifigance in
6560 6560 * terms of an offset into a file. If we were to use the above
6561 6561 * calculation instead, the structures read out of
6562 6562 * /proc/<pid>/xmap would be more difficult to decipher since
6563 6563 * it would be unclear whether two seemingly contiguous
6564 6564 * prxmap_t structures represented different segments or a
6565 6565 * single segment that had been split up into multiple prxmap_t
6566 6566 * structures (e.g. if some part of the segment had not yet
6567 6567 * been faulted in).
6568 6568 */
6569 6569 nsvd->offset = 0;
6570 6570 }
6571 6571
6572 6572 ASSERT(svd->softlockcnt == 0);
6573 6573 ASSERT(svd->softlockcnt_sbase == 0);
6574 6574 ASSERT(svd->softlockcnt_send == 0);
6575 6575 crhold(svd->cred);
6576 6576
6577 6577 if (svd->vpage != NULL) {
6578 6578 size_t bytes = vpgtob(seg_pages(seg));
6579 6579 size_t nbytes = vpgtob(seg_pages(nseg));
6580 6580 struct vpage *ovpage = svd->vpage;
6581 6581
6582 6582 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6583 6583 bcopy(ovpage, svd->vpage, bytes);
6584 6584 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6585 6585 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6586 6586 kmem_free(ovpage, bytes + nbytes);
6587 6587 }
6588 6588 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6589 6589 struct anon_map *oamp = svd->amp, *namp;
6590 6590 struct anon_hdr *nahp;
6591 6591
6592 6592 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6593 6593 ASSERT(oamp->refcnt == 1);
6594 6594 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6595 6595 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6596 6596 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6597 6597
6598 6598 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6599 6599 namp->a_szc = nseg->s_szc;
6600 6600 (void) anon_copy_ptr(oamp->ahp,
6601 6601 svd->anon_index + btop(seg->s_size),
6602 6602 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6603 6603 anon_release(oamp->ahp, btop(oamp->size));
6604 6604 oamp->ahp = nahp;
6605 6605 oamp->size = seg->s_size;
6606 6606 svd->anon_index = 0;
6607 6607 nsvd->amp = namp;
6608 6608 nsvd->anon_index = 0;
6609 6609 ANON_LOCK_EXIT(&oamp->a_rwlock);
6610 6610 } else if (svd->amp != NULL) {
6611 6611 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6612 6612 ASSERT(svd->amp == nsvd->amp);
6613 6613 ASSERT(seg->s_szc <= svd->amp->a_szc);
6614 6614 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6615 6615 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6616 6616 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6617 6617 svd->amp->refcnt++;
6618 6618 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6619 6619 }
6620 6620
6621 6621 /*
6622 6622 * Split the amount of swap reserved.
6623 6623 */
6624 6624 if (svd->swresv) {
6625 6625 /*
6626 6626 * For MAP_NORESERVE, only allocate swap reserve for pages
6627 6627 * being used. Other segments get enough to cover whole
6628 6628 * segment.
6629 6629 */
6630 6630 if (svd->flags & MAP_NORESERVE) {
6631 6631 size_t oswresv;
6632 6632
6633 6633 ASSERT(svd->amp);
6634 6634 oswresv = svd->swresv;
6635 6635 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6636 6636 svd->anon_index, btop(seg->s_size)));
6637 6637 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6638 6638 nsvd->anon_index, btop(nseg->s_size)));
6639 6639 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6640 6640 } else {
6641 6641 if (svd->pageswap) {
6642 6642 svd->swresv = segvn_count_swap_by_vpages(seg);
6643 6643 ASSERT(nsvd->swresv >= svd->swresv);
6644 6644 nsvd->swresv -= svd->swresv;
6645 6645 } else {
6646 6646 ASSERT(svd->swresv == seg->s_size +
6647 6647 nseg->s_size);
6648 6648 svd->swresv = seg->s_size;
6649 6649 nsvd->swresv = nseg->s_size;
6650 6650 }
6651 6651 }
6652 6652 }
6653 6653
6654 6654 return (nseg);
6655 6655 }
6656 6656
6657 6657 /*
6658 6658 * called on memory operations (unmap, setprot, setpagesize) for a subset
6659 6659 * of a large page segment to either demote the memory range (SDR_RANGE)
6660 6660 * or the ends (SDR_END) by addr/len.
6661 6661 *
6662 6662 * returns 0 on success. returns errno, including ENOMEM, on failure.
6663 6663 */
6664 6664 static int
6665 6665 segvn_demote_range(
6666 6666 struct seg *seg,
6667 6667 caddr_t addr,
6668 6668 size_t len,
6669 6669 int flag,
6670 6670 uint_t szcvec)
6671 6671 {
6672 6672 caddr_t eaddr = addr + len;
6673 6673 caddr_t lpgaddr, lpgeaddr;
6674 6674 struct seg *nseg;
6675 6675 struct seg *badseg1 = NULL;
6676 6676 struct seg *badseg2 = NULL;
6677 6677 size_t pgsz;
6678 6678 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6679 6679 int err;
6680 6680 uint_t szc = seg->s_szc;
6681 6681 uint_t tszcvec;
6682 6682
6683 6683 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6684 6684 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6685 6685 ASSERT(szc != 0);
6686 6686 pgsz = page_get_pagesize(szc);
6687 6687 ASSERT(seg->s_base != addr || seg->s_size != len);
6688 6688 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6689 6689 ASSERT(svd->softlockcnt == 0);
6690 6690 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6691 6691 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6692 6692
6693 6693 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6694 6694 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6695 6695 if (flag == SDR_RANGE) {
6696 6696 /* demote entire range */
6697 6697 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6698 6698 (void) segvn_split_seg(nseg, lpgeaddr);
6699 6699 ASSERT(badseg1->s_base == lpgaddr);
6700 6700 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6701 6701 } else if (addr != lpgaddr) {
6702 6702 ASSERT(flag == SDR_END);
6703 6703 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6704 6704 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6705 6705 eaddr < lpgaddr + 2 * pgsz) {
6706 6706 (void) segvn_split_seg(nseg, lpgeaddr);
6707 6707 ASSERT(badseg1->s_base == lpgaddr);
6708 6708 ASSERT(badseg1->s_size == 2 * pgsz);
6709 6709 } else {
6710 6710 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6711 6711 ASSERT(badseg1->s_base == lpgaddr);
6712 6712 ASSERT(badseg1->s_size == pgsz);
6713 6713 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6714 6714 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6715 6715 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6716 6716 badseg2 = nseg;
6717 6717 (void) segvn_split_seg(nseg, lpgeaddr);
6718 6718 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6719 6719 ASSERT(badseg2->s_size == pgsz);
6720 6720 }
6721 6721 }
6722 6722 } else {
6723 6723 ASSERT(flag == SDR_END);
6724 6724 ASSERT(eaddr < lpgeaddr);
6725 6725 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6726 6726 (void) segvn_split_seg(nseg, lpgeaddr);
6727 6727 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6728 6728 ASSERT(badseg1->s_size == pgsz);
6729 6729 }
6730 6730
6731 6731 ASSERT(badseg1 != NULL);
6732 6732 ASSERT(badseg1->s_szc == szc);
6733 6733 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6734 6734 badseg1->s_size == 2 * pgsz);
6735 6735 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6736 6736 ASSERT(badseg1->s_size == pgsz ||
6737 6737 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6738 6738 if (err = segvn_clrszc(badseg1)) {
6739 6739 return (err);
6740 6740 }
6741 6741 ASSERT(badseg1->s_szc == 0);
6742 6742
6743 6743 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6744 6744 uint_t tszc = highbit(tszcvec) - 1;
6745 6745 caddr_t ta = MAX(addr, badseg1->s_base);
6746 6746 caddr_t te;
6747 6747 size_t tpgsz = page_get_pagesize(tszc);
6748 6748
6749 6749 ASSERT(svd->type == MAP_SHARED);
6750 6750 ASSERT(flag == SDR_END);
6751 6751 ASSERT(tszc < szc && tszc > 0);
6752 6752
6753 6753 if (eaddr > badseg1->s_base + badseg1->s_size) {
6754 6754 te = badseg1->s_base + badseg1->s_size;
6755 6755 } else {
6756 6756 te = eaddr;
6757 6757 }
6758 6758
6759 6759 ASSERT(ta <= te);
6760 6760 badseg1->s_szc = tszc;
6761 6761 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6762 6762 if (badseg2 != NULL) {
6763 6763 err = segvn_demote_range(badseg1, ta, te - ta,
6764 6764 SDR_END, tszcvec);
6765 6765 if (err != 0) {
6766 6766 return (err);
6767 6767 }
6768 6768 } else {
6769 6769 return (segvn_demote_range(badseg1, ta,
6770 6770 te - ta, SDR_END, tszcvec));
6771 6771 }
6772 6772 }
6773 6773 }
6774 6774
6775 6775 if (badseg2 == NULL)
6776 6776 return (0);
6777 6777 ASSERT(badseg2->s_szc == szc);
6778 6778 ASSERT(badseg2->s_size == pgsz);
6779 6779 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6780 6780 if (err = segvn_clrszc(badseg2)) {
6781 6781 return (err);
6782 6782 }
6783 6783 ASSERT(badseg2->s_szc == 0);
6784 6784
6785 6785 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6786 6786 uint_t tszc = highbit(tszcvec) - 1;
6787 6787 size_t tpgsz = page_get_pagesize(tszc);
6788 6788
6789 6789 ASSERT(svd->type == MAP_SHARED);
6790 6790 ASSERT(flag == SDR_END);
6791 6791 ASSERT(tszc < szc && tszc > 0);
6792 6792 ASSERT(badseg2->s_base > addr);
6793 6793 ASSERT(eaddr > badseg2->s_base);
6794 6794 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6795 6795
6796 6796 badseg2->s_szc = tszc;
6797 6797 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6798 6798 return (segvn_demote_range(badseg2, badseg2->s_base,
6799 6799 eaddr - badseg2->s_base, SDR_END, tszcvec));
6800 6800 }
6801 6801 }
6802 6802
6803 6803 return (0);
6804 6804 }
6805 6805
6806 6806 static int
6807 6807 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6808 6808 {
6809 6809 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6810 6810 struct vpage *vp, *evp;
6811 6811
6812 6812 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6813 6813
6814 6814 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6815 6815 /*
6816 6816 * If segment protection can be used, simply check against them.
6817 6817 */
6818 6818 if (svd->pageprot == 0) {
6819 6819 int err;
6820 6820
6821 6821 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6822 6822 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6823 6823 return (err);
6824 6824 }
6825 6825
6826 6826 /*
6827 6827 * Have to check down to the vpage level.
6828 6828 */
6829 6829 evp = &svd->vpage[seg_page(seg, addr + len)];
6830 6830 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6831 6831 if ((VPP_PROT(vp) & prot) != prot) {
6832 6832 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6833 6833 return (EACCES);
6834 6834 }
6835 6835 }
6836 6836 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6837 6837 return (0);
6838 6838 }
6839 6839
6840 6840 static int
6841 6841 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6842 6842 {
6843 6843 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6844 6844 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6845 6845
6846 6846 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6847 6847
6848 6848 if (pgno != 0) {
6849 6849 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6850 6850 if (svd->pageprot == 0) {
6851 6851 do {
6852 6852 protv[--pgno] = svd->prot;
6853 6853 } while (pgno != 0);
6854 6854 } else {
6855 6855 size_t pgoff = seg_page(seg, addr);
6856 6856
6857 6857 do {
6858 6858 pgno--;
6859 6859 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6860 6860 } while (pgno != 0);
6861 6861 }
6862 6862 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6863 6863 }
6864 6864 return (0);
6865 6865 }
6866 6866
6867 6867 static u_offset_t
6868 6868 segvn_getoffset(struct seg *seg, caddr_t addr)
6869 6869 {
6870 6870 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6871 6871
6872 6872 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6873 6873
6874 6874 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6875 6875 }
6876 6876
6877 6877 /*ARGSUSED*/
6878 6878 static int
6879 6879 segvn_gettype(struct seg *seg, caddr_t addr)
6880 6880 {
6881 6881 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6882 6882
6883 6883 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6884 6884
6885 6885 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6886 6886 MAP_INITDATA)));
6887 6887 }
6888 6888
6889 6889 /*ARGSUSED*/
6890 6890 static int
6891 6891 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6892 6892 {
6893 6893 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6894 6894
6895 6895 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6896 6896
6897 6897 *vpp = svd->vp;
6898 6898 return (0);
6899 6899 }
6900 6900
6901 6901 /*
6902 6902 * Check to see if it makes sense to do kluster/read ahead to
6903 6903 * addr + delta relative to the mapping at addr. We assume here
6904 6904 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6905 6905 *
6906 6906 * For segvn, we currently "approve" of the action if we are
6907 6907 * still in the segment and it maps from the same vp/off,
6908 6908 * or if the advice stored in segvn_data or vpages allows it.
6909 6909 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6910 6910 */
6911 6911 static int
6912 6912 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6913 6913 {
6914 6914 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6915 6915 struct anon *oap, *ap;
6916 6916 ssize_t pd;
6917 6917 size_t page;
6918 6918 struct vnode *vp1, *vp2;
6919 6919 u_offset_t off1, off2;
6920 6920 struct anon_map *amp;
6921 6921
6922 6922 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6923 6923 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6924 6924 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6925 6925
6926 6926 if (addr + delta < seg->s_base ||
6927 6927 addr + delta >= (seg->s_base + seg->s_size))
6928 6928 return (-1); /* exceeded segment bounds */
6929 6929
6930 6930 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6931 6931 page = seg_page(seg, addr);
6932 6932
6933 6933 /*
6934 6934 * Check to see if either of the pages addr or addr + delta
6935 6935 * have advice set that prevents klustering (if MADV_RANDOM advice
6936 6936 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6937 6937 * is negative).
6938 6938 */
6939 6939 if (svd->advice == MADV_RANDOM ||
6940 6940 svd->advice == MADV_SEQUENTIAL && delta < 0)
6941 6941 return (-1);
6942 6942 else if (svd->pageadvice && svd->vpage) {
6943 6943 struct vpage *bvpp, *evpp;
6944 6944
6945 6945 bvpp = &svd->vpage[page];
6946 6946 evpp = &svd->vpage[page + pd];
6947 6947 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
6948 6948 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
6949 6949 return (-1);
6950 6950 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
6951 6951 VPP_ADVICE(evpp) == MADV_RANDOM)
6952 6952 return (-1);
6953 6953 }
6954 6954
6955 6955 if (svd->type == MAP_SHARED)
6956 6956 return (0); /* shared mapping - all ok */
6957 6957
6958 6958 if ((amp = svd->amp) == NULL)
6959 6959 return (0); /* off original vnode */
6960 6960
6961 6961 page += svd->anon_index;
6962 6962
6963 6963 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
6964 6964
6965 6965 oap = anon_get_ptr(amp->ahp, page);
6966 6966 ap = anon_get_ptr(amp->ahp, page + pd);
6967 6967
6968 6968 ANON_LOCK_EXIT(&->a_rwlock);
6969 6969
6970 6970 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
6971 6971 return (-1); /* one with and one without an anon */
6972 6972 }
6973 6973
6974 6974 if (oap == NULL) { /* implies that ap == NULL */
6975 6975 return (0); /* off original vnode */
6976 6976 }
6977 6977
6978 6978 /*
6979 6979 * Now we know we have two anon pointers - check to
6980 6980 * see if they happen to be properly allocated.
6981 6981 */
6982 6982
6983 6983 /*
6984 6984 * XXX We cheat here and don't lock the anon slots. We can't because
6985 6985 * we may have been called from the anon layer which might already
6986 6986 * have locked them. We are holding a refcnt on the slots so they
6987 6987 * can't disappear. The worst that will happen is we'll get the wrong
6988 6988 * names (vp, off) for the slots and make a poor klustering decision.
6989 6989 */
6990 6990 swap_xlate(ap, &vp1, &off1);
6991 6991 swap_xlate(oap, &vp2, &off2);
6992 6992
6993 6993
6994 6994 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
6995 6995 return (-1);
6996 6996 return (0);
6997 6997 }
6998 6998
6999 6999 /*
7000 7000 * Swap the pages of seg out to secondary storage, returning the
7001 7001 * number of bytes of storage freed.
7002 7002 *
7003 7003 * The basic idea is first to unload all translations and then to call
7004 7004 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7005 7005 * swap device. Pages to which other segments have mappings will remain
7006 7006 * mapped and won't be swapped. Our caller (as_swapout) has already
7007 7007 * performed the unloading step.
7008 7008 *
7009 7009 * The value returned is intended to correlate well with the process's
7010 7010 * memory requirements. However, there are some caveats:
7011 7011 * 1) When given a shared segment as argument, this routine will
7012 7012 * only succeed in swapping out pages for the last sharer of the
7013 7013 * segment. (Previous callers will only have decremented mapping
7014 7014 * reference counts.)
7015 7015 * 2) We assume that the hat layer maintains a large enough translation
7016 7016 * cache to capture process reference patterns.
7017 7017 */
7018 7018 static size_t
7019 7019 segvn_swapout(struct seg *seg)
7020 7020 {
7021 7021 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7022 7022 struct anon_map *amp;
7023 7023 pgcnt_t pgcnt = 0;
7024 7024 pgcnt_t npages;
7025 7025 pgcnt_t page;
7026 7026 ulong_t anon_index;
7027 7027
7028 7028 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7029 7029
7030 7030 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7031 7031 /*
7032 7032 * Find pages unmapped by our caller and force them
7033 7033 * out to the virtual swap device.
7034 7034 */
7035 7035 if ((amp = svd->amp) != NULL)
7036 7036 anon_index = svd->anon_index;
7037 7037 npages = seg->s_size >> PAGESHIFT;
7038 7038 for (page = 0; page < npages; page++) {
7039 7039 page_t *pp;
7040 7040 struct anon *ap;
7041 7041 struct vnode *vp;
7042 7042 u_offset_t off;
7043 7043 anon_sync_obj_t cookie;
7044 7044
7045 7045 /*
7046 7046 * Obtain <vp, off> pair for the page, then look it up.
7047 7047 *
7048 7048 * Note that this code is willing to consider regular
7049 7049 * pages as well as anon pages. Is this appropriate here?
7050 7050 */
7051 7051 ap = NULL;
7052 7052 if (amp != NULL) {
7053 7053 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7054 7054 if (anon_array_try_enter(amp, anon_index + page,
7055 7055 &cookie)) {
7056 7056 ANON_LOCK_EXIT(&->a_rwlock);
7057 7057 continue;
7058 7058 }
7059 7059 ap = anon_get_ptr(amp->ahp, anon_index + page);
7060 7060 if (ap != NULL) {
7061 7061 swap_xlate(ap, &vp, &off);
7062 7062 } else {
7063 7063 vp = svd->vp;
7064 7064 off = svd->offset + ptob(page);
7065 7065 }
7066 7066 anon_array_exit(&cookie);
7067 7067 ANON_LOCK_EXIT(&->a_rwlock);
7068 7068 } else {
7069 7069 vp = svd->vp;
7070 7070 off = svd->offset + ptob(page);
7071 7071 }
7072 7072 if (vp == NULL) { /* untouched zfod page */
7073 7073 ASSERT(ap == NULL);
7074 7074 continue;
7075 7075 }
7076 7076
7077 7077 pp = page_lookup_nowait(vp, off, SE_SHARED);
7078 7078 if (pp == NULL)
7079 7079 continue;
7080 7080
7081 7081
7082 7082 /*
7083 7083 * Examine the page to see whether it can be tossed out,
7084 7084 * keeping track of how many we've found.
7085 7085 */
7086 7086 if (!page_tryupgrade(pp)) {
7087 7087 /*
7088 7088 * If the page has an i/o lock and no mappings,
7089 7089 * it's very likely that the page is being
7090 7090 * written out as a result of klustering.
7091 7091 * Assume this is so and take credit for it here.
7092 7092 */
7093 7093 if (!page_io_trylock(pp)) {
7094 7094 if (!hat_page_is_mapped(pp))
7095 7095 pgcnt++;
7096 7096 } else {
7097 7097 page_io_unlock(pp);
7098 7098 }
7099 7099 page_unlock(pp);
7100 7100 continue;
7101 7101 }
7102 7102 ASSERT(!page_iolock_assert(pp));
7103 7103
7104 7104
7105 7105 /*
7106 7106 * Skip if page is locked or has mappings.
7107 7107 * We don't need the page_struct_lock to look at lckcnt
7108 7108 * and cowcnt because the page is exclusive locked.
7109 7109 */
7110 7110 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7111 7111 hat_page_is_mapped(pp)) {
7112 7112 page_unlock(pp);
7113 7113 continue;
7114 7114 }
7115 7115
7116 7116 /*
7117 7117 * dispose skips large pages so try to demote first.
7118 7118 */
7119 7119 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7120 7120 page_unlock(pp);
7121 7121 /*
7122 7122 * XXX should skip the remaining page_t's of this
7123 7123 * large page.
7124 7124 */
7125 7125 continue;
7126 7126 }
7127 7127
7128 7128 ASSERT(pp->p_szc == 0);
7129 7129
7130 7130 /*
7131 7131 * No longer mapped -- we can toss it out. How
7132 7132 * we do so depends on whether or not it's dirty.
7133 7133 */
7134 7134 if (hat_ismod(pp) && pp->p_vnode) {
7135 7135 /*
7136 7136 * We must clean the page before it can be
7137 7137 * freed. Setting B_FREE will cause pvn_done
7138 7138 * to free the page when the i/o completes.
7139 7139 * XXX: This also causes it to be accounted
7140 7140 * as a pageout instead of a swap: need
7141 7141 * B_SWAPOUT bit to use instead of B_FREE.
7142 7142 *
7143 7143 * Hold the vnode before releasing the page lock
7144 7144 * to prevent it from being freed and re-used by
7145 7145 * some other thread.
7146 7146 */
7147 7147 VN_HOLD(vp);
7148 7148 page_unlock(pp);
7149 7149
7150 7150 /*
7151 7151 * Queue all i/o requests for the pageout thread
7152 7152 * to avoid saturating the pageout devices.
7153 7153 */
7154 7154 if (!queue_io_request(vp, off))
7155 7155 VN_RELE(vp);
7156 7156 } else {
7157 7157 /*
7158 7158 * The page was clean, free it.
7159 7159 *
7160 7160 * XXX: Can we ever encounter modified pages
7161 7161 * with no associated vnode here?
7162 7162 */
7163 7163 ASSERT(pp->p_vnode != NULL);
7164 7164 /*LINTED: constant in conditional context*/
7165 7165 VN_DISPOSE(pp, B_FREE, 0, kcred);
7166 7166 }
7167 7167
7168 7168 /*
7169 7169 * Credit now even if i/o is in progress.
7170 7170 */
7171 7171 pgcnt++;
7172 7172 }
7173 7173 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7174 7174
7175 7175 /*
7176 7176 * Wakeup pageout to initiate i/o on all queued requests.
7177 7177 */
7178 7178 cv_signal_pageout();
7179 7179 return (ptob(pgcnt));
7180 7180 }
7181 7181
7182 7182 /*
7183 7183 * Synchronize primary storage cache with real object in virtual memory.
7184 7184 *
7185 7185 * XXX - Anonymous pages should not be sync'ed out at all.
7186 7186 */
7187 7187 static int
7188 7188 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7189 7189 {
7190 7190 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7191 7191 struct vpage *vpp;
7192 7192 page_t *pp;
7193 7193 u_offset_t offset;
7194 7194 struct vnode *vp;
7195 7195 u_offset_t off;
7196 7196 caddr_t eaddr;
7197 7197 int bflags;
7198 7198 int err = 0;
7199 7199 int segtype;
7200 7200 int pageprot;
7201 7201 int prot;
7202 7202 ulong_t anon_index;
7203 7203 struct anon_map *amp;
7204 7204 struct anon *ap;
7205 7205 anon_sync_obj_t cookie;
7206 7206
7207 7207 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7208 7208
7209 7209 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7210 7210
7211 7211 if (svd->softlockcnt > 0) {
7212 7212 /*
7213 7213 * If this is shared segment non 0 softlockcnt
7214 7214 * means locked pages are still in use.
7215 7215 */
7216 7216 if (svd->type == MAP_SHARED) {
7217 7217 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7218 7218 return (EAGAIN);
7219 7219 }
7220 7220
7221 7221 /*
7222 7222 * flush all pages from seg cache
7223 7223 * otherwise we may deadlock in swap_putpage
7224 7224 * for B_INVAL page (4175402).
7225 7225 *
7226 7226 * Even if we grab segvn WRITER's lock
7227 7227 * here, there might be another thread which could've
7228 7228 * successfully performed lookup/insert just before
7229 7229 * we acquired the lock here. So, grabbing either
7230 7230 * lock here is of not much use. Until we devise
7231 7231 * a strategy at upper layers to solve the
7232 7232 * synchronization issues completely, we expect
7233 7233 * applications to handle this appropriately.
7234 7234 */
7235 7235 segvn_purge(seg);
7236 7236 if (svd->softlockcnt > 0) {
7237 7237 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7238 7238 return (EAGAIN);
7239 7239 }
7240 7240 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7241 7241 svd->amp->a_softlockcnt > 0) {
7242 7242 /*
7243 7243 * Try to purge this amp's entries from pcache. It will
7244 7244 * succeed only if other segments that share the amp have no
7245 7245 * outstanding softlock's.
7246 7246 */
7247 7247 segvn_purge(seg);
7248 7248 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7249 7249 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7250 7250 return (EAGAIN);
7251 7251 }
7252 7252 }
7253 7253
7254 7254 vpp = svd->vpage;
7255 7255 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7256 7256 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7257 7257 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7258 7258
7259 7259 if (attr) {
7260 7260 pageprot = attr & ~(SHARED|PRIVATE);
7261 7261 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7262 7262
7263 7263 /*
7264 7264 * We are done if the segment types don't match
7265 7265 * or if we have segment level protections and
7266 7266 * they don't match.
7267 7267 */
7268 7268 if (svd->type != segtype) {
7269 7269 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7270 7270 return (0);
7271 7271 }
7272 7272 if (vpp == NULL) {
7273 7273 if (svd->prot != pageprot) {
7274 7274 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7275 7275 return (0);
7276 7276 }
7277 7277 prot = svd->prot;
7278 7278 } else
7279 7279 vpp = &svd->vpage[seg_page(seg, addr)];
7280 7280
7281 7281 } else if (svd->vp && svd->amp == NULL &&
7282 7282 (flags & MS_INVALIDATE) == 0) {
7283 7283
7284 7284 /*
7285 7285 * No attributes, no anonymous pages and MS_INVALIDATE flag
7286 7286 * is not on, just use one big request.
7287 7287 */
7288 7288 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7289 7289 bflags, svd->cred, NULL);
7290 7290 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7291 7291 return (err);
7292 7292 }
7293 7293
7294 7294 if ((amp = svd->amp) != NULL)
7295 7295 anon_index = svd->anon_index + seg_page(seg, addr);
7296 7296
7297 7297 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7298 7298 ap = NULL;
7299 7299 if (amp != NULL) {
7300 7300 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7301 7301 anon_array_enter(amp, anon_index, &cookie);
7302 7302 ap = anon_get_ptr(amp->ahp, anon_index++);
7303 7303 if (ap != NULL) {
7304 7304 swap_xlate(ap, &vp, &off);
7305 7305 } else {
7306 7306 vp = svd->vp;
7307 7307 off = offset;
7308 7308 }
7309 7309 anon_array_exit(&cookie);
7310 7310 ANON_LOCK_EXIT(&->a_rwlock);
7311 7311 } else {
7312 7312 vp = svd->vp;
7313 7313 off = offset;
7314 7314 }
7315 7315 offset += PAGESIZE;
7316 7316
7317 7317 if (vp == NULL) /* untouched zfod page */
7318 7318 continue;
7319 7319
7320 7320 if (attr) {
7321 7321 if (vpp) {
7322 7322 prot = VPP_PROT(vpp);
7323 7323 vpp++;
7324 7324 }
7325 7325 if (prot != pageprot) {
7326 7326 continue;
7327 7327 }
7328 7328 }
7329 7329
7330 7330 /*
7331 7331 * See if any of these pages are locked -- if so, then we
7332 7332 * will have to truncate an invalidate request at the first
7333 7333 * locked one. We don't need the page_struct_lock to test
7334 7334 * as this is only advisory; even if we acquire it someone
7335 7335 * might race in and lock the page after we unlock and before
7336 7336 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7337 7337 */
7338 7338 if (flags & MS_INVALIDATE) {
7339 7339 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7340 7340 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7341 7341 page_unlock(pp);
7342 7342 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7343 7343 return (EBUSY);
7344 7344 }
7345 7345 if (ap != NULL && pp->p_szc != 0 &&
7346 7346 page_tryupgrade(pp)) {
7347 7347 if (pp->p_lckcnt == 0 &&
7348 7348 pp->p_cowcnt == 0) {
7349 7349 /*
7350 7350 * swapfs VN_DISPOSE() won't
7351 7351 * invalidate large pages.
7352 7352 * Attempt to demote.
7353 7353 * XXX can't help it if it
7354 7354 * fails. But for swapfs
7355 7355 * pages it is no big deal.
7356 7356 */
7357 7357 (void) page_try_demote_pages(
7358 7358 pp);
7359 7359 }
7360 7360 }
7361 7361 page_unlock(pp);
7362 7362 }
7363 7363 } else if (svd->type == MAP_SHARED && amp != NULL) {
7364 7364 /*
7365 7365 * Avoid writing out to disk ISM's large pages
7366 7366 * because segspt_free_pages() relies on NULL an_pvp
7367 7367 * of anon slots of such pages.
7368 7368 */
7369 7369
7370 7370 ASSERT(svd->vp == NULL);
7371 7371 /*
7372 7372 * swapfs uses page_lookup_nowait if not freeing or
7373 7373 * invalidating and skips a page if
7374 7374 * page_lookup_nowait returns NULL.
7375 7375 */
7376 7376 pp = page_lookup_nowait(vp, off, SE_SHARED);
7377 7377 if (pp == NULL) {
7378 7378 continue;
7379 7379 }
7380 7380 if (pp->p_szc != 0) {
7381 7381 page_unlock(pp);
7382 7382 continue;
7383 7383 }
7384 7384
7385 7385 /*
7386 7386 * Note ISM pages are created large so (vp, off)'s
7387 7387 * page cannot suddenly become large after we unlock
7388 7388 * pp.
7389 7389 */
7390 7390 page_unlock(pp);
7391 7391 }
7392 7392 /*
7393 7393 * XXX - Should ultimately try to kluster
7394 7394 * calls to VOP_PUTPAGE() for performance.
7395 7395 */
7396 7396 VN_HOLD(vp);
7397 7397 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7398 7398 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7399 7399 svd->cred, NULL);
7400 7400
7401 7401 VN_RELE(vp);
7402 7402 if (err)
7403 7403 break;
7404 7404 }
7405 7405 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7406 7406 return (err);
7407 7407 }
7408 7408
7409 7409 /*
7410 7410 * Determine if we have data corresponding to pages in the
7411 7411 * primary storage virtual memory cache (i.e., "in core").
7412 7412 */
7413 7413 static size_t
7414 7414 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7415 7415 {
7416 7416 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7417 7417 struct vnode *vp, *avp;
7418 7418 u_offset_t offset, aoffset;
7419 7419 size_t p, ep;
7420 7420 int ret;
7421 7421 struct vpage *vpp;
7422 7422 page_t *pp;
7423 7423 uint_t start;
7424 7424 struct anon_map *amp; /* XXX - for locknest */
7425 7425 struct anon *ap;
7426 7426 uint_t attr;
7427 7427 anon_sync_obj_t cookie;
7428 7428
7429 7429 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7430 7430
7431 7431 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7432 7432 if (svd->amp == NULL && svd->vp == NULL) {
7433 7433 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7434 7434 bzero(vec, btopr(len));
7435 7435 return (len); /* no anonymous pages created yet */
7436 7436 }
7437 7437
7438 7438 p = seg_page(seg, addr);
7439 7439 ep = seg_page(seg, addr + len);
7440 7440 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7441 7441
7442 7442 amp = svd->amp;
7443 7443 for (; p < ep; p++, addr += PAGESIZE) {
7444 7444 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7445 7445 ret = start;
7446 7446 ap = NULL;
7447 7447 avp = NULL;
7448 7448 /* Grab the vnode/offset for the anon slot */
7449 7449 if (amp != NULL) {
7450 7450 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7451 7451 anon_array_enter(amp, svd->anon_index + p, &cookie);
7452 7452 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7453 7453 if (ap != NULL) {
7454 7454 swap_xlate(ap, &avp, &aoffset);
7455 7455 }
7456 7456 anon_array_exit(&cookie);
7457 7457 ANON_LOCK_EXIT(&->a_rwlock);
7458 7458 }
7459 7459 if ((avp != NULL) && page_exists(avp, aoffset)) {
7460 7460 /* A page exists for the anon slot */
7461 7461 ret |= SEG_PAGE_INCORE;
7462 7462
7463 7463 /*
7464 7464 * If page is mapped and writable
7465 7465 */
7466 7466 attr = (uint_t)0;
7467 7467 if ((hat_getattr(seg->s_as->a_hat, addr,
7468 7468 &attr) != -1) && (attr & PROT_WRITE)) {
7469 7469 ret |= SEG_PAGE_ANON;
7470 7470 }
7471 7471 /*
7472 7472 * Don't get page_struct lock for lckcnt and cowcnt,
7473 7473 * since this is purely advisory.
7474 7474 */
7475 7475 if ((pp = page_lookup_nowait(avp, aoffset,
7476 7476 SE_SHARED)) != NULL) {
7477 7477 if (pp->p_lckcnt)
7478 7478 ret |= SEG_PAGE_SOFTLOCK;
7479 7479 if (pp->p_cowcnt)
7480 7480 ret |= SEG_PAGE_HASCOW;
7481 7481 page_unlock(pp);
7482 7482 }
7483 7483 }
7484 7484
7485 7485 /* Gather vnode statistics */
7486 7486 vp = svd->vp;
7487 7487 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7488 7488
7489 7489 if (vp != NULL) {
7490 7490 /*
7491 7491 * Try to obtain a "shared" lock on the page
7492 7492 * without blocking. If this fails, determine
7493 7493 * if the page is in memory.
7494 7494 */
7495 7495 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7496 7496 if ((pp == NULL) && (page_exists(vp, offset))) {
7497 7497 /* Page is incore, and is named */
7498 7498 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7499 7499 }
7500 7500 /*
7501 7501 * Don't get page_struct lock for lckcnt and cowcnt,
7502 7502 * since this is purely advisory.
7503 7503 */
7504 7504 if (pp != NULL) {
7505 7505 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7506 7506 if (pp->p_lckcnt)
7507 7507 ret |= SEG_PAGE_SOFTLOCK;
7508 7508 if (pp->p_cowcnt)
7509 7509 ret |= SEG_PAGE_HASCOW;
7510 7510 page_unlock(pp);
7511 7511 }
7512 7512 }
7513 7513
7514 7514 /* Gather virtual page information */
7515 7515 if (vpp) {
7516 7516 if (VPP_ISPPLOCK(vpp))
7517 7517 ret |= SEG_PAGE_LOCKED;
7518 7518 vpp++;
7519 7519 }
7520 7520
7521 7521 *vec++ = (char)ret;
7522 7522 }
7523 7523 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7524 7524 return (len);
7525 7525 }
7526 7526
7527 7527 /*
7528 7528 * Statement for p_cowcnts/p_lckcnts.
7529 7529 *
7530 7530 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7531 7531 * irrespective of the following factors or anything else:
7532 7532 *
7533 7533 * (1) anon slots are populated or not
7534 7534 * (2) cow is broken or not
7535 7535 * (3) refcnt on ap is 1 or greater than 1
7536 7536 *
7537 7537 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7538 7538 * and munlock.
7539 7539 *
7540 7540 *
7541 7541 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7542 7542 *
7543 7543 * if vpage has PROT_WRITE
7544 7544 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7545 7545 * else
7546 7546 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7547 7547 *
7548 7548 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7549 7549 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7550 7550 *
7551 7551 * We may also break COW if softlocking on read access in the physio case.
7552 7552 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7553 7553 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7554 7554 * vpage doesn't have PROT_WRITE.
7555 7555 *
7556 7556 *
7557 7557 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7558 7558 *
7559 7559 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7560 7560 * increment p_lckcnt by calling page_subclaim() which takes care of
7561 7561 * availrmem accounting and p_lckcnt overflow.
7562 7562 *
7563 7563 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7564 7564 * increment p_cowcnt by calling page_addclaim() which takes care of
7565 7565 * availrmem availability and p_cowcnt overflow.
7566 7566 */
7567 7567
7568 7568 /*
7569 7569 * Lock down (or unlock) pages mapped by this segment.
7570 7570 *
7571 7571 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7572 7572 * At fault time they will be relocated into larger pages.
7573 7573 */
7574 7574 static int
7575 7575 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7576 7576 int attr, int op, ulong_t *lockmap, size_t pos)
7577 7577 {
7578 7578 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7579 7579 struct vpage *vpp;
7580 7580 struct vpage *evp;
7581 7581 page_t *pp;
7582 7582 u_offset_t offset;
7583 7583 u_offset_t off;
7584 7584 int segtype;
7585 7585 int pageprot;
7586 7586 int claim;
7587 7587 struct vnode *vp;
7588 7588 ulong_t anon_index;
7589 7589 struct anon_map *amp;
7590 7590 struct anon *ap;
7591 7591 struct vattr va;
7592 7592 anon_sync_obj_t cookie;
7593 7593 struct kshmid *sp = NULL;
7594 7594 struct proc *p = curproc;
7595 7595 kproject_t *proj = NULL;
7596 7596 int chargeproc = 1;
7597 7597 size_t locked_bytes = 0;
7598 7598 size_t unlocked_bytes = 0;
7599 7599 int err = 0;
7600 7600
7601 7601 /*
7602 7602 * Hold write lock on address space because may split or concatenate
7603 7603 * segments
7604 7604 */
7605 7605 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7606 7606
7607 7607 /*
7608 7608 * If this is a shm, use shm's project and zone, else use
7609 7609 * project and zone of calling process
7610 7610 */
7611 7611
7612 7612 /* Determine if this segment backs a sysV shm */
7613 7613 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7614 7614 ASSERT(svd->type == MAP_SHARED);
7615 7615 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7616 7616 sp = svd->amp->a_sp;
7617 7617 proj = sp->shm_perm.ipc_proj;
7618 7618 chargeproc = 0;
7619 7619 }
7620 7620
7621 7621 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7622 7622 if (attr) {
7623 7623 pageprot = attr & ~(SHARED|PRIVATE);
7624 7624 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7625 7625
7626 7626 /*
7627 7627 * We are done if the segment types don't match
7628 7628 * or if we have segment level protections and
7629 7629 * they don't match.
7630 7630 */
7631 7631 if (svd->type != segtype) {
7632 7632 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7633 7633 return (0);
7634 7634 }
7635 7635 if (svd->pageprot == 0 && svd->prot != pageprot) {
7636 7636 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7637 7637 return (0);
7638 7638 }
7639 7639 }
7640 7640
7641 7641 if (op == MC_LOCK) {
7642 7642 if (svd->tr_state == SEGVN_TR_INIT) {
7643 7643 svd->tr_state = SEGVN_TR_OFF;
7644 7644 } else if (svd->tr_state == SEGVN_TR_ON) {
7645 7645 ASSERT(svd->amp != NULL);
7646 7646 segvn_textunrepl(seg, 0);
7647 7647 ASSERT(svd->amp == NULL &&
7648 7648 svd->tr_state == SEGVN_TR_OFF);
7649 7649 }
7650 7650 }
7651 7651
7652 7652 /*
7653 7653 * If we're locking, then we must create a vpage structure if
7654 7654 * none exists. If we're unlocking, then check to see if there
7655 7655 * is a vpage -- if not, then we could not have locked anything.
7656 7656 */
7657 7657
7658 7658 if ((vpp = svd->vpage) == NULL) {
7659 7659 if (op == MC_LOCK)
7660 7660 segvn_vpage(seg);
7661 7661 else {
7662 7662 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7663 7663 return (0);
7664 7664 }
7665 7665 }
7666 7666
7667 7667 /*
7668 7668 * The anonymous data vector (i.e., previously
7669 7669 * unreferenced mapping to swap space) can be allocated
7670 7670 * by lazily testing for its existence.
7671 7671 */
7672 7672 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7673 7673 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7674 7674 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7675 7675 svd->amp->a_szc = seg->s_szc;
7676 7676 }
7677 7677
7678 7678 if ((amp = svd->amp) != NULL) {
7679 7679 anon_index = svd->anon_index + seg_page(seg, addr);
7680 7680 }
7681 7681
7682 7682 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7683 7683 evp = &svd->vpage[seg_page(seg, addr + len)];
7684 7684
7685 7685 if (sp != NULL)
7686 7686 mutex_enter(&sp->shm_mlock);
7687 7687
7688 7688 /* determine number of unlocked bytes in range for lock operation */
7689 7689 if (op == MC_LOCK) {
7690 7690
7691 7691 if (sp == NULL) {
7692 7692 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7693 7693 vpp++) {
7694 7694 if (!VPP_ISPPLOCK(vpp))
7695 7695 unlocked_bytes += PAGESIZE;
7696 7696 }
7697 7697 } else {
7698 7698 ulong_t i_idx, i_edx;
7699 7699 anon_sync_obj_t i_cookie;
7700 7700 struct anon *i_ap;
7701 7701 struct vnode *i_vp;
7702 7702 u_offset_t i_off;
7703 7703
7704 7704 /* Only count sysV pages once for locked memory */
7705 7705 i_edx = svd->anon_index + seg_page(seg, addr + len);
7706 7706 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7707 7707 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7708 7708 anon_array_enter(amp, i_idx, &i_cookie);
7709 7709 i_ap = anon_get_ptr(amp->ahp, i_idx);
7710 7710 if (i_ap == NULL) {
7711 7711 unlocked_bytes += PAGESIZE;
7712 7712 anon_array_exit(&i_cookie);
7713 7713 continue;
7714 7714 }
7715 7715 swap_xlate(i_ap, &i_vp, &i_off);
7716 7716 anon_array_exit(&i_cookie);
7717 7717 pp = page_lookup(i_vp, i_off, SE_SHARED);
7718 7718 if (pp == NULL) {
7719 7719 unlocked_bytes += PAGESIZE;
7720 7720 continue;
7721 7721 } else if (pp->p_lckcnt == 0)
7722 7722 unlocked_bytes += PAGESIZE;
7723 7723 page_unlock(pp);
7724 7724 }
7725 7725 ANON_LOCK_EXIT(&->a_rwlock);
7726 7726 }
7727 7727
7728 7728 mutex_enter(&p->p_lock);
7729 7729 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7730 7730 chargeproc);
7731 7731 mutex_exit(&p->p_lock);
7732 7732
7733 7733 if (err) {
7734 7734 if (sp != NULL)
7735 7735 mutex_exit(&sp->shm_mlock);
7736 7736 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7737 7737 return (err);
7738 7738 }
7739 7739 }
7740 7740 /*
7741 7741 * Loop over all pages in the range. Process if we're locking and
7742 7742 * page has not already been locked in this mapping; or if we're
7743 7743 * unlocking and the page has been locked.
7744 7744 */
7745 7745 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7746 7746 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7747 7747 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7748 7748 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7749 7749 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7750 7750
7751 7751 if (amp != NULL)
7752 7752 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7753 7753 /*
7754 7754 * If this isn't a MAP_NORESERVE segment and
7755 7755 * we're locking, allocate anon slots if they
7756 7756 * don't exist. The page is brought in later on.
7757 7757 */
7758 7758 if (op == MC_LOCK && svd->vp == NULL &&
7759 7759 ((svd->flags & MAP_NORESERVE) == 0) &&
7760 7760 amp != NULL &&
7761 7761 ((ap = anon_get_ptr(amp->ahp, anon_index))
7762 7762 == NULL)) {
7763 7763 anon_array_enter(amp, anon_index, &cookie);
7764 7764
7765 7765 if ((ap = anon_get_ptr(amp->ahp,
7766 7766 anon_index)) == NULL) {
7767 7767 pp = anon_zero(seg, addr, &ap,
7768 7768 svd->cred);
7769 7769 if (pp == NULL) {
7770 7770 anon_array_exit(&cookie);
7771 7771 ANON_LOCK_EXIT(&->a_rwlock);
7772 7772 err = ENOMEM;
7773 7773 goto out;
7774 7774 }
7775 7775 ASSERT(anon_get_ptr(amp->ahp,
7776 7776 anon_index) == NULL);
7777 7777 (void) anon_set_ptr(amp->ahp,
7778 7778 anon_index, ap, ANON_SLEEP);
7779 7779 page_unlock(pp);
7780 7780 }
7781 7781 anon_array_exit(&cookie);
7782 7782 }
7783 7783
7784 7784 /*
7785 7785 * Get name for page, accounting for
7786 7786 * existence of private copy.
7787 7787 */
7788 7788 ap = NULL;
7789 7789 if (amp != NULL) {
7790 7790 anon_array_enter(amp, anon_index, &cookie);
7791 7791 ap = anon_get_ptr(amp->ahp, anon_index);
7792 7792 if (ap != NULL) {
7793 7793 swap_xlate(ap, &vp, &off);
7794 7794 } else {
7795 7795 if (svd->vp == NULL &&
7796 7796 (svd->flags & MAP_NORESERVE)) {
7797 7797 anon_array_exit(&cookie);
7798 7798 ANON_LOCK_EXIT(&->a_rwlock);
7799 7799 continue;
7800 7800 }
7801 7801 vp = svd->vp;
7802 7802 off = offset;
7803 7803 }
7804 7804 if (op != MC_LOCK || ap == NULL) {
7805 7805 anon_array_exit(&cookie);
7806 7806 ANON_LOCK_EXIT(&->a_rwlock);
7807 7807 }
7808 7808 } else {
7809 7809 vp = svd->vp;
7810 7810 off = offset;
7811 7811 }
7812 7812
7813 7813 /*
7814 7814 * Get page frame. It's ok if the page is
7815 7815 * not available when we're unlocking, as this
7816 7816 * may simply mean that a page we locked got
7817 7817 * truncated out of existence after we locked it.
7818 7818 *
7819 7819 * Invoke VOP_GETPAGE() to obtain the page struct
7820 7820 * since we may need to read it from disk if its
7821 7821 * been paged out.
7822 7822 */
7823 7823 if (op != MC_LOCK)
7824 7824 pp = page_lookup(vp, off, SE_SHARED);
7825 7825 else {
7826 7826 page_t *pl[1 + 1];
7827 7827 int error;
7828 7828
7829 7829 ASSERT(vp != NULL);
7830 7830
7831 7831 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7832 7832 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7833 7833 S_OTHER, svd->cred, NULL);
7834 7834
7835 7835 if (error && ap != NULL) {
7836 7836 anon_array_exit(&cookie);
7837 7837 ANON_LOCK_EXIT(&->a_rwlock);
7838 7838 }
7839 7839
7840 7840 /*
7841 7841 * If the error is EDEADLK then we must bounce
7842 7842 * up and drop all vm subsystem locks and then
7843 7843 * retry the operation later
7844 7844 * This behavior is a temporary measure because
7845 7845 * ufs/sds logging is badly designed and will
7846 7846 * deadlock if we don't allow this bounce to
7847 7847 * happen. The real solution is to re-design
7848 7848 * the logging code to work properly. See bug
7849 7849 * 4125102 for details of the problem.
7850 7850 */
7851 7851 if (error == EDEADLK) {
7852 7852 err = error;
7853 7853 goto out;
7854 7854 }
7855 7855 /*
7856 7856 * Quit if we fail to fault in the page. Treat
7857 7857 * the failure as an error, unless the addr
7858 7858 * is mapped beyond the end of a file.
7859 7859 */
7860 7860 if (error && svd->vp) {
7861 7861 va.va_mask = AT_SIZE;
7862 7862 if (VOP_GETATTR(svd->vp, &va, 0,
7863 7863 svd->cred, NULL) != 0) {
7864 7864 err = EIO;
7865 7865 goto out;
7866 7866 }
7867 7867 if (btopr(va.va_size) >=
7868 7868 btopr(off + 1)) {
7869 7869 err = EIO;
7870 7870 goto out;
7871 7871 }
7872 7872 goto out;
7873 7873
7874 7874 } else if (error) {
7875 7875 err = EIO;
7876 7876 goto out;
7877 7877 }
7878 7878 pp = pl[0];
7879 7879 ASSERT(pp != NULL);
7880 7880 }
7881 7881
7882 7882 /*
7883 7883 * See Statement at the beginning of this routine.
7884 7884 *
7885 7885 * claim is always set if MAP_PRIVATE and PROT_WRITE
7886 7886 * irrespective of following factors:
7887 7887 *
7888 7888 * (1) anon slots are populated or not
7889 7889 * (2) cow is broken or not
7890 7890 * (3) refcnt on ap is 1 or greater than 1
7891 7891 *
7892 7892 * See 4140683 for details
7893 7893 */
7894 7894 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7895 7895 (svd->type == MAP_PRIVATE));
7896 7896
7897 7897 /*
7898 7898 * Perform page-level operation appropriate to
7899 7899 * operation. If locking, undo the SOFTLOCK
7900 7900 * performed to bring the page into memory
7901 7901 * after setting the lock. If unlocking,
7902 7902 * and no page was found, account for the claim
7903 7903 * separately.
7904 7904 */
7905 7905 if (op == MC_LOCK) {
7906 7906 int ret = 1; /* Assume success */
7907 7907
7908 7908 ASSERT(!VPP_ISPPLOCK(vpp));
7909 7909
7910 7910 ret = page_pp_lock(pp, claim, 0);
7911 7911 if (ap != NULL) {
7912 7912 if (ap->an_pvp != NULL) {
7913 7913 anon_swap_free(ap, pp);
7914 7914 }
7915 7915 anon_array_exit(&cookie);
7916 7916 ANON_LOCK_EXIT(&->a_rwlock);
7917 7917 }
7918 7918 if (ret == 0) {
7919 7919 /* locking page failed */
7920 7920 page_unlock(pp);
7921 7921 err = EAGAIN;
7922 7922 goto out;
7923 7923 }
7924 7924 VPP_SETPPLOCK(vpp);
7925 7925 if (sp != NULL) {
7926 7926 if (pp->p_lckcnt == 1)
7927 7927 locked_bytes += PAGESIZE;
7928 7928 } else
7929 7929 locked_bytes += PAGESIZE;
7930 7930
7931 7931 if (lockmap != (ulong_t *)NULL)
7932 7932 BT_SET(lockmap, pos);
7933 7933
7934 7934 page_unlock(pp);
7935 7935 } else {
7936 7936 ASSERT(VPP_ISPPLOCK(vpp));
7937 7937 if (pp != NULL) {
7938 7938 /* sysV pages should be locked */
7939 7939 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7940 7940 page_pp_unlock(pp, claim, 0);
7941 7941 if (sp != NULL) {
7942 7942 if (pp->p_lckcnt == 0)
7943 7943 unlocked_bytes
7944 7944 += PAGESIZE;
7945 7945 } else
7946 7946 unlocked_bytes += PAGESIZE;
7947 7947 page_unlock(pp);
7948 7948 } else {
7949 7949 ASSERT(sp == NULL);
7950 7950 unlocked_bytes += PAGESIZE;
7951 7951 }
7952 7952 VPP_CLRPPLOCK(vpp);
7953 7953 }
7954 7954 }
7955 7955 }
7956 7956 out:
7957 7957 if (op == MC_LOCK) {
7958 7958 /* Credit back bytes that did not get locked */
7959 7959 if ((unlocked_bytes - locked_bytes) > 0) {
7960 7960 if (proj == NULL)
7961 7961 mutex_enter(&p->p_lock);
7962 7962 rctl_decr_locked_mem(p, proj,
7963 7963 (unlocked_bytes - locked_bytes), chargeproc);
7964 7964 if (proj == NULL)
7965 7965 mutex_exit(&p->p_lock);
7966 7966 }
7967 7967
7968 7968 } else {
7969 7969 /* Account bytes that were unlocked */
7970 7970 if (unlocked_bytes > 0) {
7971 7971 if (proj == NULL)
7972 7972 mutex_enter(&p->p_lock);
7973 7973 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7974 7974 chargeproc);
7975 7975 if (proj == NULL)
7976 7976 mutex_exit(&p->p_lock);
7977 7977 }
7978 7978 }
7979 7979 if (sp != NULL)
7980 7980 mutex_exit(&sp->shm_mlock);
7981 7981 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7982 7982
7983 7983 return (err);
7984 7984 }
7985 7985
7986 7986 /*
7987 7987 * Set advice from user for specified pages
7988 7988 * There are 5 types of advice:
7989 7989 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7990 7990 * MADV_RANDOM - Random page references
7991 7991 * do not allow readahead or 'klustering'
7992 7992 * MADV_SEQUENTIAL - Sequential page references
7993 7993 * Pages previous to the one currently being
7994 7994 * accessed (determined by fault) are 'not needed'
7995 7995 * and are freed immediately
7996 7996 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7997 7997 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7998 7998 * MADV_FREE - Contents can be discarded
7999 7999 * MADV_ACCESS_DEFAULT- Default access
8000 8000 * MADV_ACCESS_LWP - Next LWP will access heavily
8001 8001 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8002 8002 */
8003 8003 static int
8004 8004 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8005 8005 {
8006 8006 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8007 8007 size_t page;
8008 8008 int err = 0;
8009 8009 int already_set;
8010 8010 struct anon_map *amp;
8011 8011 ulong_t anon_index;
8012 8012 struct seg *next;
8013 8013 lgrp_mem_policy_t policy;
8014 8014 struct seg *prev;
8015 8015 struct vnode *vp;
8016 8016
8017 8017 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8018 8018
8019 8019 /*
8020 8020 * In case of MADV_FREE, we won't be modifying any segment private
8021 8021 * data structures; so, we only need to grab READER's lock
8022 8022 */
8023 8023 if (behav != MADV_FREE) {
8024 8024 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8025 8025 if (svd->tr_state != SEGVN_TR_OFF) {
8026 8026 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8027 8027 return (0);
8028 8028 }
8029 8029 } else {
8030 8030 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8031 8031 }
8032 8032
8033 8033 /*
8034 8034 * Large pages are assumed to be only turned on when accesses to the
8035 8035 * segment's address range have spatial and temporal locality. That
8036 8036 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8037 8037 * Also, ignore advice affecting lgroup memory allocation
8038 8038 * if don't need to do lgroup optimizations on this system
8039 8039 */
8040 8040
8041 8041 if ((behav == MADV_SEQUENTIAL &&
8042 8042 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8043 8043 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8044 8044 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8045 8045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8046 8046 return (0);
8047 8047 }
8048 8048
8049 8049 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8050 8050 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8051 8051 /*
8052 8052 * Since we are going to unload hat mappings
8053 8053 * we first have to flush the cache. Otherwise
8054 8054 * this might lead to system panic if another
8055 8055 * thread is doing physio on the range whose
8056 8056 * mappings are unloaded by madvise(3C).
8057 8057 */
8058 8058 if (svd->softlockcnt > 0) {
8059 8059 /*
8060 8060 * If this is shared segment non 0 softlockcnt
8061 8061 * means locked pages are still in use.
8062 8062 */
8063 8063 if (svd->type == MAP_SHARED) {
8064 8064 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8065 8065 return (EAGAIN);
8066 8066 }
8067 8067 /*
8068 8068 * Since we do have the segvn writers lock
8069 8069 * nobody can fill the cache with entries
8070 8070 * belonging to this seg during the purge.
8071 8071 * The flush either succeeds or we still
8072 8072 * have pending I/Os. In the later case,
8073 8073 * madvise(3C) fails.
8074 8074 */
8075 8075 segvn_purge(seg);
8076 8076 if (svd->softlockcnt > 0) {
8077 8077 /*
8078 8078 * Since madvise(3C) is advisory and
8079 8079 * it's not part of UNIX98, madvise(3C)
8080 8080 * failure here doesn't cause any hardship.
8081 8081 * Note that we don't block in "as" layer.
8082 8082 */
8083 8083 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8084 8084 return (EAGAIN);
8085 8085 }
8086 8086 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8087 8087 svd->amp->a_softlockcnt > 0) {
8088 8088 /*
8089 8089 * Try to purge this amp's entries from pcache. It
8090 8090 * will succeed only if other segments that share the
8091 8091 * amp have no outstanding softlock's.
8092 8092 */
8093 8093 segvn_purge(seg);
8094 8094 }
8095 8095 }
8096 8096
8097 8097 amp = svd->amp;
8098 8098 vp = svd->vp;
8099 8099 if (behav == MADV_FREE) {
8100 8100 /*
8101 8101 * MADV_FREE is not supported for segments with
8102 8102 * underlying object; if anonmap is NULL, anon slots
8103 8103 * are not yet populated and there is nothing for
8104 8104 * us to do. As MADV_FREE is advisory, we don't
8105 8105 * return error in either case.
8106 8106 */
8107 8107 if (vp != NULL || amp == NULL) {
8108 8108 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8109 8109 return (0);
8110 8110 }
8111 8111
8112 8112 segvn_purge(seg);
8113 8113
8114 8114 page = seg_page(seg, addr);
8115 8115 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8116 8116 anon_disclaim(amp, svd->anon_index + page, len);
8117 8117 ANON_LOCK_EXIT(&->a_rwlock);
8118 8118 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8119 8119 return (0);
8120 8120 }
8121 8121
8122 8122 /*
8123 8123 * If advice is to be applied to entire segment,
8124 8124 * use advice field in seg_data structure
8125 8125 * otherwise use appropriate vpage entry.
8126 8126 */
8127 8127 if ((addr == seg->s_base) && (len == seg->s_size)) {
8128 8128 switch (behav) {
8129 8129 case MADV_ACCESS_LWP:
8130 8130 case MADV_ACCESS_MANY:
8131 8131 case MADV_ACCESS_DEFAULT:
8132 8132 /*
8133 8133 * Set memory allocation policy for this segment
8134 8134 */
8135 8135 policy = lgrp_madv_to_policy(behav, len, svd->type);
8136 8136 if (svd->type == MAP_SHARED)
8137 8137 already_set = lgrp_shm_policy_set(policy, amp,
8138 8138 svd->anon_index, vp, svd->offset, len);
8139 8139 else {
8140 8140 /*
8141 8141 * For private memory, need writers lock on
8142 8142 * address space because the segment may be
8143 8143 * split or concatenated when changing policy
8144 8144 */
8145 8145 if (AS_READ_HELD(seg->s_as,
8146 8146 &seg->s_as->a_lock)) {
8147 8147 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8148 8148 return (IE_RETRY);
8149 8149 }
8150 8150
8151 8151 already_set = lgrp_privm_policy_set(policy,
8152 8152 &svd->policy_info, len);
8153 8153 }
8154 8154
8155 8155 /*
8156 8156 * If policy set already and it shouldn't be reapplied,
8157 8157 * don't do anything.
8158 8158 */
8159 8159 if (already_set &&
8160 8160 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8161 8161 break;
8162 8162
8163 8163 /*
8164 8164 * Mark any existing pages in given range for
8165 8165 * migration
8166 8166 */
8167 8167 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8168 8168 vp, svd->offset, 1);
8169 8169
8170 8170 /*
8171 8171 * If same policy set already or this is a shared
8172 8172 * memory segment, don't need to try to concatenate
8173 8173 * segment with adjacent ones.
8174 8174 */
8175 8175 if (already_set || svd->type == MAP_SHARED)
8176 8176 break;
8177 8177
8178 8178 /*
8179 8179 * Try to concatenate this segment with previous
8180 8180 * one and next one, since we changed policy for
8181 8181 * this one and it may be compatible with adjacent
8182 8182 * ones now.
8183 8183 */
8184 8184 prev = AS_SEGPREV(seg->s_as, seg);
8185 8185 next = AS_SEGNEXT(seg->s_as, seg);
8186 8186
8187 8187 if (next && next->s_ops == &segvn_ops &&
8188 8188 addr + len == next->s_base)
8189 8189 (void) segvn_concat(seg, next, 1);
8190 8190
8191 8191 if (prev && prev->s_ops == &segvn_ops &&
8192 8192 addr == prev->s_base + prev->s_size) {
8193 8193 /*
8194 8194 * Drop lock for private data of current
8195 8195 * segment before concatenating (deleting) it
8196 8196 * and return IE_REATTACH to tell as_ctl() that
8197 8197 * current segment has changed
8198 8198 */
8199 8199 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8200 8200 if (!segvn_concat(prev, seg, 1))
8201 8201 err = IE_REATTACH;
8202 8202
8203 8203 return (err);
8204 8204 }
8205 8205 break;
8206 8206
8207 8207 case MADV_SEQUENTIAL:
8208 8208 /*
8209 8209 * unloading mapping guarantees
8210 8210 * detection in segvn_fault
8211 8211 */
8212 8212 ASSERT(seg->s_szc == 0);
8213 8213 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8214 8214 hat_unload(seg->s_as->a_hat, addr, len,
8215 8215 HAT_UNLOAD);
8216 8216 /* FALLTHROUGH */
8217 8217 case MADV_NORMAL:
8218 8218 case MADV_RANDOM:
8219 8219 svd->advice = (uchar_t)behav;
8220 8220 svd->pageadvice = 0;
8221 8221 break;
8222 8222 case MADV_WILLNEED: /* handled in memcntl */
8223 8223 case MADV_DONTNEED: /* handled in memcntl */
8224 8224 case MADV_FREE: /* handled above */
8225 8225 break;
8226 8226 default:
8227 8227 err = EINVAL;
8228 8228 }
8229 8229 } else {
8230 8230 caddr_t eaddr;
8231 8231 struct seg *new_seg;
8232 8232 struct segvn_data *new_svd;
8233 8233 u_offset_t off;
8234 8234 caddr_t oldeaddr;
8235 8235
8236 8236 page = seg_page(seg, addr);
8237 8237
8238 8238 segvn_vpage(seg);
8239 8239
8240 8240 switch (behav) {
8241 8241 struct vpage *bvpp, *evpp;
8242 8242
8243 8243 case MADV_ACCESS_LWP:
8244 8244 case MADV_ACCESS_MANY:
8245 8245 case MADV_ACCESS_DEFAULT:
8246 8246 /*
8247 8247 * Set memory allocation policy for portion of this
8248 8248 * segment
8249 8249 */
8250 8250
8251 8251 /*
8252 8252 * Align address and length of advice to page
8253 8253 * boundaries for large pages
8254 8254 */
8255 8255 if (seg->s_szc != 0) {
8256 8256 size_t pgsz;
8257 8257
8258 8258 pgsz = page_get_pagesize(seg->s_szc);
8259 8259 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8260 8260 len = P2ROUNDUP(len, pgsz);
8261 8261 }
8262 8262
8263 8263 /*
8264 8264 * Check to see whether policy is set already
8265 8265 */
8266 8266 policy = lgrp_madv_to_policy(behav, len, svd->type);
8267 8267
8268 8268 anon_index = svd->anon_index + page;
8269 8269 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8270 8270
8271 8271 if (svd->type == MAP_SHARED)
8272 8272 already_set = lgrp_shm_policy_set(policy, amp,
8273 8273 anon_index, vp, off, len);
8274 8274 else
8275 8275 already_set =
8276 8276 (policy == svd->policy_info.mem_policy);
8277 8277
8278 8278 /*
8279 8279 * If policy set already and it shouldn't be reapplied,
8280 8280 * don't do anything.
8281 8281 */
8282 8282 if (already_set &&
8283 8283 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8284 8284 break;
8285 8285
8286 8286 /*
8287 8287 * For private memory, need writers lock on
8288 8288 * address space because the segment may be
8289 8289 * split or concatenated when changing policy
8290 8290 */
8291 8291 if (svd->type == MAP_PRIVATE &&
8292 8292 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8293 8293 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8294 8294 return (IE_RETRY);
8295 8295 }
8296 8296
8297 8297 /*
8298 8298 * Mark any existing pages in given range for
8299 8299 * migration
8300 8300 */
8301 8301 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8302 8302 vp, svd->offset, 1);
8303 8303
8304 8304 /*
8305 8305 * Don't need to try to split or concatenate
8306 8306 * segments, since policy is same or this is a shared
8307 8307 * memory segment
8308 8308 */
8309 8309 if (already_set || svd->type == MAP_SHARED)
8310 8310 break;
8311 8311
8312 8312 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8313 8313 ASSERT(svd->amp == NULL);
8314 8314 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8315 8315 ASSERT(svd->softlockcnt == 0);
8316 8316 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8317 8317 HAT_REGION_TEXT);
8318 8318 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8319 8319 }
8320 8320
8321 8321 /*
8322 8322 * Split off new segment if advice only applies to a
8323 8323 * portion of existing segment starting in middle
8324 8324 */
8325 8325 new_seg = NULL;
8326 8326 eaddr = addr + len;
8327 8327 oldeaddr = seg->s_base + seg->s_size;
8328 8328 if (addr > seg->s_base) {
8329 8329 /*
8330 8330 * Must flush I/O page cache
8331 8331 * before splitting segment
8332 8332 */
8333 8333 if (svd->softlockcnt > 0)
8334 8334 segvn_purge(seg);
8335 8335
8336 8336 /*
8337 8337 * Split segment and return IE_REATTACH to tell
8338 8338 * as_ctl() that current segment changed
8339 8339 */
8340 8340 new_seg = segvn_split_seg(seg, addr);
8341 8341 new_svd = (struct segvn_data *)new_seg->s_data;
8342 8342 err = IE_REATTACH;
8343 8343
8344 8344 /*
8345 8345 * If new segment ends where old one
8346 8346 * did, try to concatenate the new
8347 8347 * segment with next one.
8348 8348 */
8349 8349 if (eaddr == oldeaddr) {
8350 8350 /*
8351 8351 * Set policy for new segment
8352 8352 */
8353 8353 (void) lgrp_privm_policy_set(policy,
8354 8354 &new_svd->policy_info,
8355 8355 new_seg->s_size);
8356 8356
8357 8357 next = AS_SEGNEXT(new_seg->s_as,
8358 8358 new_seg);
8359 8359
8360 8360 if (next &&
8361 8361 next->s_ops == &segvn_ops &&
8362 8362 eaddr == next->s_base)
8363 8363 (void) segvn_concat(new_seg,
8364 8364 next, 1);
8365 8365 }
8366 8366 }
8367 8367
8368 8368 /*
8369 8369 * Split off end of existing segment if advice only
8370 8370 * applies to a portion of segment ending before
8371 8371 * end of the existing segment
8372 8372 */
8373 8373 if (eaddr < oldeaddr) {
8374 8374 /*
8375 8375 * Must flush I/O page cache
8376 8376 * before splitting segment
8377 8377 */
8378 8378 if (svd->softlockcnt > 0)
8379 8379 segvn_purge(seg);
8380 8380
8381 8381 /*
8382 8382 * If beginning of old segment was already
8383 8383 * split off, use new segment to split end off
8384 8384 * from.
8385 8385 */
8386 8386 if (new_seg != NULL && new_seg != seg) {
8387 8387 /*
8388 8388 * Split segment
8389 8389 */
8390 8390 (void) segvn_split_seg(new_seg, eaddr);
8391 8391
8392 8392 /*
8393 8393 * Set policy for new segment
8394 8394 */
8395 8395 (void) lgrp_privm_policy_set(policy,
8396 8396 &new_svd->policy_info,
8397 8397 new_seg->s_size);
8398 8398 } else {
8399 8399 /*
8400 8400 * Split segment and return IE_REATTACH
8401 8401 * to tell as_ctl() that current
8402 8402 * segment changed
8403 8403 */
8404 8404 (void) segvn_split_seg(seg, eaddr);
8405 8405 err = IE_REATTACH;
8406 8406
8407 8407 (void) lgrp_privm_policy_set(policy,
8408 8408 &svd->policy_info, seg->s_size);
8409 8409
8410 8410 /*
8411 8411 * If new segment starts where old one
8412 8412 * did, try to concatenate it with
8413 8413 * previous segment.
8414 8414 */
8415 8415 if (addr == seg->s_base) {
8416 8416 prev = AS_SEGPREV(seg->s_as,
8417 8417 seg);
8418 8418
8419 8419 /*
8420 8420 * Drop lock for private data
8421 8421 * of current segment before
8422 8422 * concatenating (deleting) it
8423 8423 */
8424 8424 if (prev &&
8425 8425 prev->s_ops ==
8426 8426 &segvn_ops &&
8427 8427 addr == prev->s_base +
8428 8428 prev->s_size) {
8429 8429 SEGVN_LOCK_EXIT(
8430 8430 seg->s_as,
8431 8431 &svd->lock);
8432 8432 (void) segvn_concat(
8433 8433 prev, seg, 1);
8434 8434 return (err);
8435 8435 }
8436 8436 }
8437 8437 }
8438 8438 }
8439 8439 break;
8440 8440 case MADV_SEQUENTIAL:
8441 8441 ASSERT(seg->s_szc == 0);
8442 8442 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8443 8443 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8444 8444 /* FALLTHROUGH */
8445 8445 case MADV_NORMAL:
8446 8446 case MADV_RANDOM:
8447 8447 bvpp = &svd->vpage[page];
8448 8448 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8449 8449 for (; bvpp < evpp; bvpp++)
8450 8450 VPP_SETADVICE(bvpp, behav);
8451 8451 svd->advice = MADV_NORMAL;
8452 8452 break;
8453 8453 case MADV_WILLNEED: /* handled in memcntl */
8454 8454 case MADV_DONTNEED: /* handled in memcntl */
8455 8455 case MADV_FREE: /* handled above */
8456 8456 break;
8457 8457 default:
8458 8458 err = EINVAL;
8459 8459 }
8460 8460 }
8461 8461 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8462 8462 return (err);
8463 8463 }
8464 8464
8465 8465 /*
8466 8466 * Create a vpage structure for this seg.
8467 8467 */
8468 8468 static void
8469 8469 segvn_vpage(struct seg *seg)
8470 8470 {
8471 8471 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8472 8472 struct vpage *vp, *evp;
8473 8473
8474 8474 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8475 8475
8476 8476 /*
8477 8477 * If no vpage structure exists, allocate one. Copy the protections
8478 8478 * and the advice from the segment itself to the individual pages.
8479 8479 */
8480 8480 if (svd->vpage == NULL) {
8481 8481 svd->pageadvice = 1;
8482 8482 svd->vpage = kmem_zalloc(seg_pages(seg) * sizeof (struct vpage),
8483 8483 KM_SLEEP);
8484 8484 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8485 8485 for (vp = svd->vpage; vp < evp; vp++) {
8486 8486 VPP_SETPROT(vp, svd->prot);
8487 8487 VPP_SETADVICE(vp, svd->advice);
8488 8488 }
8489 8489 }
8490 8490 }
8491 8491
8492 8492 /*
8493 8493 * Dump the pages belonging to this segvn segment.
8494 8494 */
8495 8495 static void
8496 8496 segvn_dump(struct seg *seg)
8497 8497 {
8498 8498 struct segvn_data *svd;
8499 8499 page_t *pp;
8500 8500 struct anon_map *amp;
8501 8501 ulong_t anon_index;
8502 8502 struct vnode *vp;
8503 8503 u_offset_t off, offset;
8504 8504 pfn_t pfn;
8505 8505 pgcnt_t page, npages;
8506 8506 caddr_t addr;
8507 8507
8508 8508 npages = seg_pages(seg);
8509 8509 svd = (struct segvn_data *)seg->s_data;
8510 8510 vp = svd->vp;
8511 8511 off = offset = svd->offset;
8512 8512 addr = seg->s_base;
8513 8513
8514 8514 if ((amp = svd->amp) != NULL) {
8515 8515 anon_index = svd->anon_index;
8516 8516 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8517 8517 }
8518 8518
8519 8519 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8520 8520 struct anon *ap;
8521 8521 int we_own_it = 0;
8522 8522
8523 8523 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8524 8524 swap_xlate_nopanic(ap, &vp, &off);
8525 8525 } else {
8526 8526 vp = svd->vp;
8527 8527 off = offset;
8528 8528 }
8529 8529
8530 8530 /*
8531 8531 * If pp == NULL, the page either does not exist
8532 8532 * or is exclusively locked. So determine if it
8533 8533 * exists before searching for it.
8534 8534 */
8535 8535
8536 8536 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8537 8537 we_own_it = 1;
8538 8538 else
8539 8539 pp = page_exists(vp, off);
8540 8540
8541 8541 if (pp) {
8542 8542 pfn = page_pptonum(pp);
8543 8543 dump_addpage(seg->s_as, addr, pfn);
8544 8544 if (we_own_it)
8545 8545 page_unlock(pp);
8546 8546 }
8547 8547 addr += PAGESIZE;
8548 8548 dump_timeleft = dump_timeout;
8549 8549 }
8550 8550
8551 8551 if (amp != NULL)
8552 8552 ANON_LOCK_EXIT(&->a_rwlock);
8553 8553 }
8554 8554
8555 8555 #ifdef DEBUG
8556 8556 static uint32_t segvn_pglock_mtbf = 0;
8557 8557 #endif
8558 8558
8559 8559 #define PCACHE_SHWLIST ((page_t *)-2)
8560 8560 #define NOPCACHE_SHWLIST ((page_t *)-1)
8561 8561
8562 8562 /*
8563 8563 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8564 8564 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8565 8565 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8566 8566 * the same parts of the segment. Currently shadow list creation is only
8567 8567 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8568 8568 * tagged with segment pointer, starting virtual address and length. This
8569 8569 * approach for MAP_SHARED segments may add many pcache entries for the same
8570 8570 * set of pages and lead to long hash chains that decrease pcache lookup
8571 8571 * performance. To avoid this issue for shared segments shared anon map and
8572 8572 * starting anon index are used for pcache entry tagging. This allows all
8573 8573 * segments to share pcache entries for the same anon range and reduces pcache
8574 8574 * chain's length as well as memory overhead from duplicate shadow lists and
8575 8575 * pcache entries.
8576 8576 *
8577 8577 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8578 8578 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8579 8579 * part of softlockcnt accounting is done differently for private and shared
8580 8580 * segments. In private segment case softlock is only incremented when a new
8581 8581 * shadow list is created but not when an existing one is found via
8582 8582 * seg_plookup(). pcache entries have reference count incremented/decremented
8583 8583 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8584 8584 * reference count can be purged (and purging is needed before segment can be
8585 8585 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8586 8586 * decrement softlockcnt. Since in private segment case each of its pcache
8587 8587 * entries only belongs to this segment we can expect that when
8588 8588 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8589 8589 * segment purge will succeed and softlockcnt will drop to 0. In shared
8590 8590 * segment case reference count in pcache entry counts active locks from many
8591 8591 * different segments so we can't expect segment purging to succeed even when
8592 8592 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8593 8593 * segment. To be able to determine when there're no pending pagelocks in
8594 8594 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8595 8595 * but instead softlockcnt is incremented and decremented for every
8596 8596 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8597 8597 * list was created or an existing one was found. When softlockcnt drops to 0
8598 8598 * this segment no longer has any claims for pcached shadow lists and the
8599 8599 * segment can be freed even if there're still active pcache entries
8600 8600 * shared by this segment anon map. Shared segment pcache entries belong to
8601 8601 * anon map and are typically removed when anon map is freed after all
8602 8602 * processes destroy the segments that use this anon map.
8603 8603 */
8604 8604 static int
8605 8605 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8606 8606 enum lock_type type, enum seg_rw rw)
8607 8607 {
8608 8608 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8609 8609 size_t np;
8610 8610 pgcnt_t adjustpages;
8611 8611 pgcnt_t npages;
8612 8612 ulong_t anon_index;
8613 8613 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8614 8614 uint_t error;
8615 8615 struct anon_map *amp;
8616 8616 pgcnt_t anpgcnt;
8617 8617 struct page **pplist, **pl, *pp;
8618 8618 caddr_t a;
8619 8619 size_t page;
8620 8620 caddr_t lpgaddr, lpgeaddr;
8621 8621 anon_sync_obj_t cookie;
8622 8622 int anlock;
8623 8623 struct anon_map *pamp;
8624 8624 caddr_t paddr;
8625 8625 seg_preclaim_cbfunc_t preclaim_callback;
8626 8626 size_t pgsz;
8627 8627 int use_pcache;
8628 8628 size_t wlen;
8629 8629 uint_t pflags = 0;
8630 8630 int sftlck_sbase = 0;
8631 8631 int sftlck_send = 0;
8632 8632
8633 8633 #ifdef DEBUG
8634 8634 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8635 8635 hrtime_t ts = gethrtime();
8636 8636 if ((ts % segvn_pglock_mtbf) == 0) {
8637 8637 return (ENOTSUP);
8638 8638 }
8639 8639 if ((ts % segvn_pglock_mtbf) == 1) {
8640 8640 return (EFAULT);
8641 8641 }
8642 8642 }
8643 8643 #endif
8644 8644
8645 8645 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8646 8646 "segvn_pagelock: start seg %p addr %p", seg, addr);
8647 8647
8648 8648 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8649 8649 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8650 8650
8651 8651 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8652 8652
8653 8653 /*
8654 8654 * for now we only support pagelock to anon memory. We would have to
8655 8655 * check protections for vnode objects and call into the vnode driver.
8656 8656 * That's too much for a fast path. Let the fault entry point handle
8657 8657 * it.
8658 8658 */
8659 8659 if (svd->vp != NULL) {
8660 8660 if (type == L_PAGELOCK) {
8661 8661 error = ENOTSUP;
8662 8662 goto out;
8663 8663 }
8664 8664 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8665 8665 }
8666 8666 if ((amp = svd->amp) == NULL) {
8667 8667 if (type == L_PAGELOCK) {
8668 8668 error = EFAULT;
8669 8669 goto out;
8670 8670 }
8671 8671 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8672 8672 }
8673 8673 if (rw != S_READ && rw != S_WRITE) {
8674 8674 if (type == L_PAGELOCK) {
8675 8675 error = ENOTSUP;
8676 8676 goto out;
8677 8677 }
8678 8678 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8679 8679 }
8680 8680
8681 8681 if (seg->s_szc != 0) {
8682 8682 /*
8683 8683 * We are adjusting the pagelock region to the large page size
8684 8684 * boundary because the unlocked part of a large page cannot
8685 8685 * be freed anyway unless all constituent pages of a large
8686 8686 * page are locked. Bigger regions reduce pcache chain length
8687 8687 * and improve lookup performance. The tradeoff is that the
8688 8688 * very first segvn_pagelock() call for a given page is more
8689 8689 * expensive if only 1 page_t is needed for IO. This is only
8690 8690 * an issue if pcache entry doesn't get reused by several
8691 8691 * subsequent calls. We optimize here for the case when pcache
8692 8692 * is heavily used by repeated IOs to the same address range.
8693 8693 *
8694 8694 * Note segment's page size cannot change while we are holding
8695 8695 * as lock. And then it cannot change while softlockcnt is
8696 8696 * not 0. This will allow us to correctly recalculate large
8697 8697 * page size region for the matching pageunlock/reclaim call
8698 8698 * since as_pageunlock() caller must always match
8699 8699 * as_pagelock() call's addr and len.
8700 8700 *
8701 8701 * For pageunlock *ppp points to the pointer of page_t that
8702 8702 * corresponds to the real unadjusted start address. Similar
8703 8703 * for pagelock *ppp must point to the pointer of page_t that
8704 8704 * corresponds to the real unadjusted start address.
8705 8705 */
8706 8706 pgsz = page_get_pagesize(seg->s_szc);
8707 8707 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8708 8708 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8709 8709 } else if (len < segvn_pglock_comb_thrshld) {
8710 8710 lpgaddr = addr;
8711 8711 lpgeaddr = addr + len;
8712 8712 adjustpages = 0;
8713 8713 pgsz = PAGESIZE;
8714 8714 } else {
8715 8715 /*
8716 8716 * Align the address range of large enough requests to allow
8717 8717 * combining of different shadow lists into 1 to reduce memory
8718 8718 * overhead from potentially overlapping large shadow lists
8719 8719 * (worst case is we have a 1MB IO into buffers with start
8720 8720 * addresses separated by 4K). Alignment is only possible if
8721 8721 * padded chunks have sufficient access permissions. Note
8722 8722 * permissions won't change between L_PAGELOCK and
8723 8723 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8724 8724 * segvn_setprot() to wait until softlockcnt drops to 0. This
8725 8725 * allows us to determine in L_PAGEUNLOCK the same range we
8726 8726 * computed in L_PAGELOCK.
8727 8727 *
8728 8728 * If alignment is limited by segment ends set
8729 8729 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8730 8730 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8731 8731 * per segment counters. In L_PAGEUNLOCK case decrease
8732 8732 * softlockcnt_sbase/softlockcnt_send counters if
8733 8733 * sftlck_sbase/sftlck_send flags are set. When
8734 8734 * softlockcnt_sbase/softlockcnt_send are non 0
8735 8735 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8736 8736 * won't merge the segments. This restriction combined with
8737 8737 * restriction on segment unmapping and splitting for segments
8738 8738 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8739 8739 * correctly determine the same range that was previously
8740 8740 * locked by matching L_PAGELOCK.
8741 8741 */
8742 8742 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8743 8743 pgsz = PAGESIZE;
8744 8744 if (svd->type == MAP_PRIVATE) {
8745 8745 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8746 8746 segvn_pglock_comb_balign);
8747 8747 if (lpgaddr < seg->s_base) {
8748 8748 lpgaddr = seg->s_base;
8749 8749 sftlck_sbase = 1;
8750 8750 }
8751 8751 } else {
8752 8752 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8753 8753 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8754 8754 if (aaix < svd->anon_index) {
8755 8755 lpgaddr = seg->s_base;
8756 8756 sftlck_sbase = 1;
8757 8757 } else {
8758 8758 lpgaddr = addr - ptob(aix - aaix);
8759 8759 ASSERT(lpgaddr >= seg->s_base);
8760 8760 }
8761 8761 }
8762 8762 if (svd->pageprot && lpgaddr != addr) {
8763 8763 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8764 8764 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8765 8765 while (vp < evp) {
8766 8766 if ((VPP_PROT(vp) & protchk) == 0) {
8767 8767 break;
8768 8768 }
8769 8769 vp++;
8770 8770 }
8771 8771 if (vp < evp) {
8772 8772 lpgaddr = addr;
8773 8773 pflags = 0;
8774 8774 }
8775 8775 }
8776 8776 lpgeaddr = addr + len;
8777 8777 if (pflags) {
8778 8778 if (svd->type == MAP_PRIVATE) {
8779 8779 lpgeaddr = (caddr_t)P2ROUNDUP(
8780 8780 (uintptr_t)lpgeaddr,
8781 8781 segvn_pglock_comb_balign);
8782 8782 } else {
8783 8783 ulong_t aix = svd->anon_index +
8784 8784 seg_page(seg, lpgeaddr);
8785 8785 ulong_t aaix = P2ROUNDUP(aix,
8786 8786 segvn_pglock_comb_palign);
8787 8787 if (aaix < aix) {
8788 8788 lpgeaddr = 0;
8789 8789 } else {
8790 8790 lpgeaddr += ptob(aaix - aix);
8791 8791 }
8792 8792 }
8793 8793 if (lpgeaddr == 0 ||
8794 8794 lpgeaddr > seg->s_base + seg->s_size) {
8795 8795 lpgeaddr = seg->s_base + seg->s_size;
8796 8796 sftlck_send = 1;
8797 8797 }
8798 8798 }
8799 8799 if (svd->pageprot && lpgeaddr != addr + len) {
8800 8800 struct vpage *vp;
8801 8801 struct vpage *evp;
8802 8802
8803 8803 vp = &svd->vpage[seg_page(seg, addr + len)];
8804 8804 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8805 8805
8806 8806 while (vp < evp) {
8807 8807 if ((VPP_PROT(vp) & protchk) == 0) {
8808 8808 break;
8809 8809 }
8810 8810 vp++;
8811 8811 }
8812 8812 if (vp < evp) {
8813 8813 lpgeaddr = addr + len;
8814 8814 }
8815 8815 }
8816 8816 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8817 8817 }
8818 8818
8819 8819 /*
8820 8820 * For MAP_SHARED segments we create pcache entries tagged by amp and
8821 8821 * anon index so that we can share pcache entries with other segments
8822 8822 * that map this amp. For private segments pcache entries are tagged
8823 8823 * with segment and virtual address.
8824 8824 */
8825 8825 if (svd->type == MAP_SHARED) {
8826 8826 pamp = amp;
8827 8827 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8828 8828 ptob(svd->anon_index));
8829 8829 preclaim_callback = shamp_reclaim;
8830 8830 } else {
8831 8831 pamp = NULL;
8832 8832 paddr = lpgaddr;
8833 8833 preclaim_callback = segvn_reclaim;
8834 8834 }
8835 8835
8836 8836 if (type == L_PAGEUNLOCK) {
8837 8837 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8838 8838
8839 8839 /*
8840 8840 * update hat ref bits for /proc. We need to make sure
8841 8841 * that threads tracing the ref and mod bits of the
8842 8842 * address space get the right data.
8843 8843 * Note: page ref and mod bits are updated at reclaim time
8844 8844 */
8845 8845 if (seg->s_as->a_vbits) {
8846 8846 for (a = addr; a < addr + len; a += PAGESIZE) {
8847 8847 if (rw == S_WRITE) {
8848 8848 hat_setstat(seg->s_as, a,
8849 8849 PAGESIZE, P_REF | P_MOD);
8850 8850 } else {
8851 8851 hat_setstat(seg->s_as, a,
8852 8852 PAGESIZE, P_REF);
8853 8853 }
8854 8854 }
8855 8855 }
8856 8856
8857 8857 /*
8858 8858 * Check the shadow list entry after the last page used in
8859 8859 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8860 8860 * was not inserted into pcache and is not large page
8861 8861 * adjusted. In this case call reclaim callback directly and
8862 8862 * don't adjust the shadow list start and size for large
8863 8863 * pages.
8864 8864 */
8865 8865 npages = btop(len);
8866 8866 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8867 8867 void *ptag;
8868 8868 if (pamp != NULL) {
8869 8869 ASSERT(svd->type == MAP_SHARED);
8870 8870 ptag = (void *)pamp;
8871 8871 paddr = (caddr_t)((addr - seg->s_base) +
8872 8872 ptob(svd->anon_index));
8873 8873 } else {
8874 8874 ptag = (void *)seg;
8875 8875 paddr = addr;
8876 8876 }
8877 8877 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8878 8878 } else {
8879 8879 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8880 8880 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8881 8881 len = lpgeaddr - lpgaddr;
8882 8882 npages = btop(len);
8883 8883 seg_pinactive(seg, pamp, paddr, len,
8884 8884 *ppp - adjustpages, rw, pflags, preclaim_callback);
8885 8885 }
8886 8886
8887 8887 if (pamp != NULL) {
8888 8888 ASSERT(svd->type == MAP_SHARED);
8889 8889 ASSERT(svd->softlockcnt >= npages);
8890 8890 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 8891 }
8892 8892
8893 8893 if (sftlck_sbase) {
8894 8894 ASSERT(svd->softlockcnt_sbase > 0);
8895 8895 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
8896 8896 }
8897 8897 if (sftlck_send) {
8898 8898 ASSERT(svd->softlockcnt_send > 0);
8899 8899 atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
8900 8900 }
8901 8901
8902 8902 /*
8903 8903 * If someone is blocked while unmapping, we purge
8904 8904 * segment page cache and thus reclaim pplist synchronously
8905 8905 * without waiting for seg_pasync_thread. This speeds up
8906 8906 * unmapping in cases where munmap(2) is called, while
8907 8907 * raw async i/o is still in progress or where a thread
8908 8908 * exits on data fault in a multithreaded application.
8909 8909 */
8910 8910 if (AS_ISUNMAPWAIT(seg->s_as)) {
8911 8911 if (svd->softlockcnt == 0) {
8912 8912 mutex_enter(&seg->s_as->a_contents);
8913 8913 if (AS_ISUNMAPWAIT(seg->s_as)) {
8914 8914 AS_CLRUNMAPWAIT(seg->s_as);
8915 8915 cv_broadcast(&seg->s_as->a_cv);
8916 8916 }
8917 8917 mutex_exit(&seg->s_as->a_contents);
8918 8918 } else if (pamp == NULL) {
8919 8919 /*
8920 8920 * softlockcnt is not 0 and this is a
8921 8921 * MAP_PRIVATE segment. Try to purge its
8922 8922 * pcache entries to reduce softlockcnt.
8923 8923 * If it drops to 0 segvn_reclaim()
8924 8924 * will wake up a thread waiting on
8925 8925 * unmapwait flag.
8926 8926 *
8927 8927 * We don't purge MAP_SHARED segments with non
8928 8928 * 0 softlockcnt since IO is still in progress
8929 8929 * for such segments.
8930 8930 */
8931 8931 ASSERT(svd->type == MAP_PRIVATE);
8932 8932 segvn_purge(seg);
8933 8933 }
8934 8934 }
8935 8935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8936 8936 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8937 8937 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8938 8938 return (0);
8939 8939 }
8940 8940
8941 8941 /* The L_PAGELOCK case ... */
8942 8942
8943 8943 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8944 8944
8945 8945 /*
8946 8946 * For MAP_SHARED segments we have to check protections before
8947 8947 * seg_plookup() since pcache entries may be shared by many segments
8948 8948 * with potentially different page protections.
8949 8949 */
8950 8950 if (pamp != NULL) {
8951 8951 ASSERT(svd->type == MAP_SHARED);
8952 8952 if (svd->pageprot == 0) {
8953 8953 if ((svd->prot & protchk) == 0) {
8954 8954 error = EACCES;
8955 8955 goto out;
8956 8956 }
8957 8957 } else {
8958 8958 /*
8959 8959 * check page protections
8960 8960 */
8961 8961 caddr_t ea;
8962 8962
8963 8963 if (seg->s_szc) {
8964 8964 a = lpgaddr;
8965 8965 ea = lpgeaddr;
8966 8966 } else {
8967 8967 a = addr;
8968 8968 ea = addr + len;
8969 8969 }
8970 8970 for (; a < ea; a += pgsz) {
8971 8971 struct vpage *vp;
8972 8972
8973 8973 ASSERT(seg->s_szc == 0 ||
8974 8974 sameprot(seg, a, pgsz));
8975 8975 vp = &svd->vpage[seg_page(seg, a)];
8976 8976 if ((VPP_PROT(vp) & protchk) == 0) {
8977 8977 error = EACCES;
8978 8978 goto out;
8979 8979 }
8980 8980 }
8981 8981 }
8982 8982 }
8983 8983
8984 8984 /*
8985 8985 * try to find pages in segment page cache
8986 8986 */
8987 8987 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 8988 if (pplist != NULL) {
8989 8989 if (pamp != NULL) {
8990 8990 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 8991 ASSERT(svd->type == MAP_SHARED);
8992 8992 atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 8993 npages);
8994 8994 }
8995 8995 if (sftlck_sbase) {
8996 8996 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
8997 8997 }
8998 8998 if (sftlck_send) {
8999 8999 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9000 9000 }
9001 9001 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 9002 *ppp = pplist + adjustpages;
9003 9003 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 9004 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 9005 return (0);
9006 9006 }
9007 9007
9008 9008 /*
9009 9009 * For MAP_SHARED segments we already verified above that segment
9010 9010 * protections allow this pagelock operation.
9011 9011 */
9012 9012 if (pamp == NULL) {
9013 9013 ASSERT(svd->type == MAP_PRIVATE);
9014 9014 if (svd->pageprot == 0) {
9015 9015 if ((svd->prot & protchk) == 0) {
9016 9016 error = EACCES;
9017 9017 goto out;
9018 9018 }
9019 9019 if (svd->prot & PROT_WRITE) {
9020 9020 wlen = lpgeaddr - lpgaddr;
9021 9021 } else {
9022 9022 wlen = 0;
9023 9023 ASSERT(rw == S_READ);
9024 9024 }
9025 9025 } else {
9026 9026 int wcont = 1;
9027 9027 /*
9028 9028 * check page protections
9029 9029 */
9030 9030 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9031 9031 struct vpage *vp;
9032 9032
9033 9033 ASSERT(seg->s_szc == 0 ||
9034 9034 sameprot(seg, a, pgsz));
9035 9035 vp = &svd->vpage[seg_page(seg, a)];
9036 9036 if ((VPP_PROT(vp) & protchk) == 0) {
9037 9037 error = EACCES;
9038 9038 goto out;
9039 9039 }
9040 9040 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9041 9041 wlen += pgsz;
9042 9042 } else {
9043 9043 wcont = 0;
9044 9044 ASSERT(rw == S_READ);
9045 9045 }
9046 9046 }
9047 9047 }
9048 9048 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9049 9049 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9050 9050 }
9051 9051
9052 9052 /*
9053 9053 * Only build large page adjusted shadow list if we expect to insert
9054 9054 * it into pcache. For large enough pages it's a big overhead to
9055 9055 * create a shadow list of the entire large page. But this overhead
9056 9056 * should be amortized over repeated pcache hits on subsequent reuse
9057 9057 * of this shadow list (IO into any range within this shadow list will
9058 9058 * find it in pcache since we large page align the request for pcache
9059 9059 * lookups). pcache performance is improved with bigger shadow lists
9060 9060 * as it reduces the time to pcache the entire big segment and reduces
9061 9061 * pcache chain length.
9062 9062 */
9063 9063 if (seg_pinsert_check(seg, pamp, paddr,
9064 9064 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9065 9065 addr = lpgaddr;
9066 9066 len = lpgeaddr - lpgaddr;
9067 9067 use_pcache = 1;
9068 9068 } else {
9069 9069 use_pcache = 0;
9070 9070 /*
9071 9071 * Since this entry will not be inserted into the pcache, we
9072 9072 * will not do any adjustments to the starting address or
9073 9073 * size of the memory to be locked.
9074 9074 */
9075 9075 adjustpages = 0;
9076 9076 }
9077 9077 npages = btop(len);
9078 9078
9079 9079 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9080 9080 pl = pplist;
9081 9081 *ppp = pplist + adjustpages;
9082 9082 /*
9083 9083 * If use_pcache is 0 this shadow list is not large page adjusted.
9084 9084 * Record this info in the last entry of shadow array so that
9085 9085 * L_PAGEUNLOCK can determine if it should large page adjust the
9086 9086 * address range to find the real range that was locked.
9087 9087 */
9088 9088 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9089 9089
9090 9090 page = seg_page(seg, addr);
9091 9091 anon_index = svd->anon_index + page;
9092 9092
9093 9093 anlock = 0;
9094 9094 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9095 9095 ASSERT(amp->a_szc >= seg->s_szc);
9096 9096 anpgcnt = page_get_pagecnt(amp->a_szc);
9097 9097 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9098 9098 struct anon *ap;
9099 9099 struct vnode *vp;
9100 9100 u_offset_t off;
9101 9101
9102 9102 /*
9103 9103 * Lock and unlock anon array only once per large page.
9104 9104 * anon_array_enter() locks the root anon slot according to
9105 9105 * a_szc which can't change while anon map is locked. We lock
9106 9106 * anon the first time through this loop and each time we
9107 9107 * reach anon index that corresponds to a root of a large
9108 9108 * page.
9109 9109 */
9110 9110 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9111 9111 ASSERT(anlock == 0);
9112 9112 anon_array_enter(amp, anon_index, &cookie);
9113 9113 anlock = 1;
9114 9114 }
9115 9115 ap = anon_get_ptr(amp->ahp, anon_index);
9116 9116
9117 9117 /*
9118 9118 * We must never use seg_pcache for COW pages
9119 9119 * because we might end up with original page still
9120 9120 * lying in seg_pcache even after private page is
9121 9121 * created. This leads to data corruption as
9122 9122 * aio_write refers to the page still in cache
9123 9123 * while all other accesses refer to the private
9124 9124 * page.
9125 9125 */
9126 9126 if (ap == NULL || ap->an_refcnt != 1) {
9127 9127 struct vpage *vpage;
9128 9128
9129 9129 if (seg->s_szc) {
9130 9130 error = EFAULT;
9131 9131 break;
9132 9132 }
9133 9133 if (svd->vpage != NULL) {
9134 9134 vpage = &svd->vpage[seg_page(seg, a)];
9135 9135 } else {
9136 9136 vpage = NULL;
9137 9137 }
9138 9138 ASSERT(anlock);
9139 9139 anon_array_exit(&cookie);
9140 9140 anlock = 0;
9141 9141 pp = NULL;
9142 9142 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9143 9143 vpage, &pp, 0, F_INVAL, rw, 1);
9144 9144 if (error) {
9145 9145 error = fc_decode(error);
9146 9146 break;
9147 9147 }
9148 9148 anon_array_enter(amp, anon_index, &cookie);
9149 9149 anlock = 1;
9150 9150 ap = anon_get_ptr(amp->ahp, anon_index);
9151 9151 if (ap == NULL || ap->an_refcnt != 1) {
9152 9152 error = EFAULT;
9153 9153 break;
9154 9154 }
9155 9155 }
9156 9156 swap_xlate(ap, &vp, &off);
9157 9157 pp = page_lookup_nowait(vp, off, SE_SHARED);
9158 9158 if (pp == NULL) {
9159 9159 error = EFAULT;
9160 9160 break;
9161 9161 }
9162 9162 if (ap->an_pvp != NULL) {
9163 9163 anon_swap_free(ap, pp);
9164 9164 }
9165 9165 /*
9166 9166 * Unlock anon if this is the last slot in a large page.
9167 9167 */
9168 9168 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9169 9169 ASSERT(anlock);
9170 9170 anon_array_exit(&cookie);
9171 9171 anlock = 0;
9172 9172 }
9173 9173 *pplist++ = pp;
9174 9174 }
9175 9175 if (anlock) { /* Ensure the lock is dropped */
9176 9176 anon_array_exit(&cookie);
9177 9177 }
9178 9178 ANON_LOCK_EXIT(&->a_rwlock);
9179 9179
9180 9180 if (a >= addr + len) {
9181 9181 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 9182 if (pamp != NULL) {
9183 9183 ASSERT(svd->type == MAP_SHARED);
9184 9184 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 9185 npages);
9186 9186 wlen = len;
9187 9187 }
9188 9188 if (sftlck_sbase) {
9189 9189 atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
9190 9190 }
9191 9191 if (sftlck_send) {
9192 9192 atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
9193 9193 }
9194 9194 if (use_pcache) {
9195 9195 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 9196 rw, pflags, preclaim_callback);
9197 9197 }
9198 9198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 9199 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 9200 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 9201 return (0);
9202 9202 }
9203 9203
9204 9204 pplist = pl;
9205 9205 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9206 9206 while (np > (uint_t)0) {
9207 9207 ASSERT(PAGE_LOCKED(*pplist));
9208 9208 page_unlock(*pplist);
9209 9209 np--;
9210 9210 pplist++;
9211 9211 }
9212 9212 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9213 9213 out:
9214 9214 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9215 9215 *ppp = NULL;
9216 9216 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9217 9217 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9218 9218 return (error);
9219 9219 }
9220 9220
9221 9221 /*
9222 9222 * purge any cached pages in the I/O page cache
9223 9223 */
9224 9224 static void
9225 9225 segvn_purge(struct seg *seg)
9226 9226 {
9227 9227 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9228 9228
9229 9229 /*
9230 9230 * pcache is only used by pure anon segments.
9231 9231 */
9232 9232 if (svd->amp == NULL || svd->vp != NULL) {
9233 9233 return;
9234 9234 }
9235 9235
9236 9236 /*
9237 9237 * For MAP_SHARED segments non 0 segment's softlockcnt means
9238 9238 * active IO is still in progress via this segment. So we only
9239 9239 * purge MAP_SHARED segments when their softlockcnt is 0.
9240 9240 */
9241 9241 if (svd->type == MAP_PRIVATE) {
9242 9242 if (svd->softlockcnt) {
9243 9243 seg_ppurge(seg, NULL, 0);
9244 9244 }
9245 9245 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9246 9246 seg_ppurge(seg, svd->amp, 0);
9247 9247 }
9248 9248 }
9249 9249
9250 9250 /*
9251 9251 * If async argument is not 0 we are called from pcache async thread and don't
9252 9252 * hold AS lock.
9253 9253 */
9254 9254
9255 9255 /*ARGSUSED*/
9256 9256 static int
9257 9257 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9258 9258 enum seg_rw rw, int async)
9259 9259 {
9260 9260 struct seg *seg = (struct seg *)ptag;
9261 9261 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9262 9262 pgcnt_t np, npages;
9263 9263 struct page **pl;
9264 9264
9265 9265 npages = np = btop(len);
9266 9266 ASSERT(npages);
9267 9267
9268 9268 ASSERT(svd->vp == NULL && svd->amp != NULL);
9269 9269 ASSERT(svd->softlockcnt >= npages);
9270 9270 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9271 9271
9272 9272 pl = pplist;
9273 9273
9274 9274 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9275 9275 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9276 9276
9277 9277 while (np > (uint_t)0) {
9278 9278 if (rw == S_WRITE) {
9279 9279 hat_setrefmod(*pplist);
9280 9280 } else {
9281 9281 hat_setref(*pplist);
9282 9282 }
9283 9283 page_unlock(*pplist);
9284 9284 np--;
9285 9285 pplist++;
9286 9286 }
9287 9287
9288 9288 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9289 9289
9290 9290 /*
9291 9291 * If we are pcache async thread we don't hold AS lock. This means if
9292 9292 * softlockcnt drops to 0 after the decrement below address space may
9293 9293 * get freed. We can't allow it since after softlock derement to 0 we
9294 9294 * still need to access as structure for possible wakeup of unmap
9295 9295 * waiters. To prevent the disappearance of as we take this segment
9296 9296 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9297 9297 * make sure this routine completes before segment is freed.
9298 9298 *
9299 9299 * The second complication we have to deal with in async case is a
9300 9300 * possibility of missed wake up of unmap wait thread. When we don't
9301 9301 * hold as lock here we may take a_contents lock before unmap wait
9302 9302 * thread that was first to see softlockcnt was still not 0. As a
9303 9303 * result we'll fail to wake up an unmap wait thread. To avoid this
9304 9304 * race we set nounmapwait flag in as structure if we drop softlockcnt
9305 9305 * to 0 when we were called by pcache async thread. unmapwait thread
9306 9306 * will not block if this flag is set.
9307 9307 */
9308 9308 if (async) {
9309 9309 mutex_enter(&svd->segfree_syncmtx);
9310 9310 }
9311 9311
9312 9312 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9313 9313 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9314 9314 mutex_enter(&seg->s_as->a_contents);
9315 9315 if (async) {
9316 9316 AS_SETNOUNMAPWAIT(seg->s_as);
9317 9317 }
9318 9318 if (AS_ISUNMAPWAIT(seg->s_as)) {
9319 9319 AS_CLRUNMAPWAIT(seg->s_as);
9320 9320 cv_broadcast(&seg->s_as->a_cv);
9321 9321 }
9322 9322 mutex_exit(&seg->s_as->a_contents);
9323 9323 }
9324 9324 }
9325 9325
9326 9326 if (async) {
9327 9327 mutex_exit(&svd->segfree_syncmtx);
9328 9328 }
9329 9329 return (0);
9330 9330 }
9331 9331
9332 9332 /*ARGSUSED*/
9333 9333 static int
9334 9334 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9335 9335 enum seg_rw rw, int async)
9336 9336 {
9337 9337 amp_t *amp = (amp_t *)ptag;
9338 9338 pgcnt_t np, npages;
9339 9339 struct page **pl;
9340 9340
9341 9341 npages = np = btop(len);
9342 9342 ASSERT(npages);
9343 9343 ASSERT(amp->a_softlockcnt >= npages);
9344 9344
9345 9345 pl = pplist;
9346 9346
9347 9347 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9348 9348 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9349 9349
9350 9350 while (np > (uint_t)0) {
9351 9351 if (rw == S_WRITE) {
9352 9352 hat_setrefmod(*pplist);
9353 9353 } else {
9354 9354 hat_setref(*pplist);
9355 9355 }
9356 9356 page_unlock(*pplist);
9357 9357 np--;
9358 9358 pplist++;
9359 9359 }
9360 9360
9361 9361 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9362 9362
9363 9363 /*
9364 9364 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9365 9365 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9366 9366 * and anonmap_purge() acquires a_purgemtx.
9367 9367 */
9368 9368 mutex_enter(&->a_purgemtx);
9369 9369 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9370 9370 amp->a_purgewait) {
9371 9371 amp->a_purgewait = 0;
9372 9372 cv_broadcast(&->a_purgecv);
9373 9373 }
9374 9374 mutex_exit(&->a_purgemtx);
9375 9375 return (0);
9376 9376 }
9377 9377
9378 9378 /*
9379 9379 * get a memory ID for an addr in a given segment
9380 9380 *
9381 9381 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9382 9382 * At fault time they will be relocated into larger pages.
9383 9383 */
9384 9384 static int
9385 9385 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9386 9386 {
9387 9387 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9388 9388 struct anon *ap = NULL;
9389 9389 ulong_t anon_index;
9390 9390 struct anon_map *amp;
9391 9391 anon_sync_obj_t cookie;
9392 9392
9393 9393 if (svd->type == MAP_PRIVATE) {
9394 9394 memidp->val[0] = (uintptr_t)seg->s_as;
9395 9395 memidp->val[1] = (uintptr_t)addr;
9396 9396 return (0);
9397 9397 }
9398 9398
9399 9399 if (svd->type == MAP_SHARED) {
9400 9400 if (svd->vp) {
9401 9401 memidp->val[0] = (uintptr_t)svd->vp;
9402 9402 memidp->val[1] = (u_longlong_t)svd->offset +
9403 9403 (uintptr_t)(addr - seg->s_base);
9404 9404 return (0);
9405 9405 } else {
9406 9406
9407 9407 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9408 9408 if ((amp = svd->amp) != NULL) {
9409 9409 anon_index = svd->anon_index +
9410 9410 seg_page(seg, addr);
9411 9411 }
9412 9412 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9413 9413
9414 9414 ASSERT(amp != NULL);
9415 9415
9416 9416 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9417 9417 anon_array_enter(amp, anon_index, &cookie);
9418 9418 ap = anon_get_ptr(amp->ahp, anon_index);
9419 9419 if (ap == NULL) {
9420 9420 page_t *pp;
9421 9421
9422 9422 pp = anon_zero(seg, addr, &ap, svd->cred);
9423 9423 if (pp == NULL) {
9424 9424 anon_array_exit(&cookie);
9425 9425 ANON_LOCK_EXIT(&->a_rwlock);
9426 9426 return (ENOMEM);
9427 9427 }
9428 9428 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9429 9429 == NULL);
9430 9430 (void) anon_set_ptr(amp->ahp, anon_index,
9431 9431 ap, ANON_SLEEP);
9432 9432 page_unlock(pp);
9433 9433 }
9434 9434
9435 9435 anon_array_exit(&cookie);
9436 9436 ANON_LOCK_EXIT(&->a_rwlock);
9437 9437
9438 9438 memidp->val[0] = (uintptr_t)ap;
9439 9439 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9440 9440 return (0);
9441 9441 }
9442 9442 }
9443 9443 return (EINVAL);
9444 9444 }
9445 9445
9446 9446 static int
9447 9447 sameprot(struct seg *seg, caddr_t a, size_t len)
9448 9448 {
9449 9449 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9450 9450 struct vpage *vpage;
9451 9451 spgcnt_t pages = btop(len);
9452 9452 uint_t prot;
9453 9453
9454 9454 if (svd->pageprot == 0)
9455 9455 return (1);
9456 9456
9457 9457 ASSERT(svd->vpage != NULL);
9458 9458
9459 9459 vpage = &svd->vpage[seg_page(seg, a)];
9460 9460 prot = VPP_PROT(vpage);
9461 9461 vpage++;
9462 9462 pages--;
9463 9463 while (pages-- > 0) {
9464 9464 if (prot != VPP_PROT(vpage))
9465 9465 return (0);
9466 9466 vpage++;
9467 9467 }
9468 9468 return (1);
9469 9469 }
9470 9470
9471 9471 /*
9472 9472 * Get memory allocation policy info for specified address in given segment
9473 9473 */
9474 9474 static lgrp_mem_policy_info_t *
9475 9475 segvn_getpolicy(struct seg *seg, caddr_t addr)
9476 9476 {
9477 9477 struct anon_map *amp;
9478 9478 ulong_t anon_index;
9479 9479 lgrp_mem_policy_info_t *policy_info;
9480 9480 struct segvn_data *svn_data;
9481 9481 u_offset_t vn_off;
9482 9482 vnode_t *vp;
9483 9483
9484 9484 ASSERT(seg != NULL);
9485 9485
9486 9486 svn_data = (struct segvn_data *)seg->s_data;
9487 9487 if (svn_data == NULL)
9488 9488 return (NULL);
9489 9489
9490 9490 /*
9491 9491 * Get policy info for private or shared memory
9492 9492 */
9493 9493 if (svn_data->type != MAP_SHARED) {
9494 9494 if (svn_data->tr_state != SEGVN_TR_ON) {
9495 9495 policy_info = &svn_data->policy_info;
9496 9496 } else {
9497 9497 policy_info = &svn_data->tr_policy_info;
9498 9498 ASSERT(policy_info->mem_policy ==
9499 9499 LGRP_MEM_POLICY_NEXT_SEG);
9500 9500 }
9501 9501 } else {
9502 9502 amp = svn_data->amp;
9503 9503 anon_index = svn_data->anon_index + seg_page(seg, addr);
9504 9504 vp = svn_data->vp;
9505 9505 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9506 9506 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9507 9507 }
9508 9508
9509 9509 return (policy_info);
9510 9510 }
9511 9511
9512 9512 /*ARGSUSED*/
9513 9513 static int
9514 9514 segvn_capable(struct seg *seg, segcapability_t capability)
9515 9515 {
9516 9516 return (0);
9517 9517 }
9518 9518
9519 9519 /*
9520 9520 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9521 9521 * established to per vnode mapping per lgroup amp pages instead of to vnode
9522 9522 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9523 9523 * may share the same text replication amp. If a suitable amp doesn't already
9524 9524 * exist in svntr hash table create a new one. We may fail to bind to amp if
9525 9525 * segment is not eligible for text replication. Code below first checks for
9526 9526 * these conditions. If binding is successful segment tr_state is set to on
9527 9527 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9528 9528 * svd->amp remains as NULL.
9529 9529 */
9530 9530 static void
9531 9531 segvn_textrepl(struct seg *seg)
9532 9532 {
9533 9533 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9534 9534 vnode_t *vp = svd->vp;
9535 9535 u_offset_t off = svd->offset;
9536 9536 size_t size = seg->s_size;
9537 9537 u_offset_t eoff = off + size;
9538 9538 uint_t szc = seg->s_szc;
9539 9539 ulong_t hash = SVNTR_HASH_FUNC(vp);
9540 9540 svntr_t *svntrp;
9541 9541 struct vattr va;
9542 9542 proc_t *p = seg->s_as->a_proc;
9543 9543 lgrp_id_t lgrp_id;
9544 9544 lgrp_id_t olid;
9545 9545 int first;
9546 9546 struct anon_map *amp;
9547 9547
9548 9548 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9549 9549 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9550 9550 ASSERT(p != NULL);
9551 9551 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9552 9552 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9553 9553 ASSERT(svd->flags & MAP_TEXT);
9554 9554 ASSERT(svd->type == MAP_PRIVATE);
9555 9555 ASSERT(vp != NULL && svd->amp == NULL);
9556 9556 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9557 9557 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9558 9558 ASSERT(seg->s_as != &kas);
9559 9559 ASSERT(off < eoff);
9560 9560 ASSERT(svntr_hashtab != NULL);
9561 9561
9562 9562 /*
9563 9563 * If numa optimizations are no longer desired bail out.
9564 9564 */
9565 9565 if (!lgrp_optimizations()) {
9566 9566 svd->tr_state = SEGVN_TR_OFF;
9567 9567 return;
9568 9568 }
9569 9569
9570 9570 /*
9571 9571 * Avoid creating anon maps with size bigger than the file size.
9572 9572 * If VOP_GETATTR() call fails bail out.
9573 9573 */
9574 9574 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9575 9575 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9576 9576 svd->tr_state = SEGVN_TR_OFF;
9577 9577 SEGVN_TR_ADDSTAT(gaerr);
9578 9578 return;
9579 9579 }
9580 9580 if (btopr(va.va_size) < btopr(eoff)) {
9581 9581 svd->tr_state = SEGVN_TR_OFF;
9582 9582 SEGVN_TR_ADDSTAT(overmap);
9583 9583 return;
9584 9584 }
9585 9585
9586 9586 /*
9587 9587 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9588 9588 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9589 9589 * mapping that checks if trcache for this vnode needs to be
9590 9590 * invalidated can't miss us.
9591 9591 */
9592 9592 if (!(vp->v_flag & VVMEXEC)) {
9593 9593 mutex_enter(&vp->v_lock);
9594 9594 vp->v_flag |= VVMEXEC;
9595 9595 mutex_exit(&vp->v_lock);
9596 9596 }
9597 9597 mutex_enter(&svntr_hashtab[hash].tr_lock);
9598 9598 /*
9599 9599 * Bail out if potentially MAP_SHARED writable mappings exist to this
9600 9600 * vnode. We don't want to use old file contents from existing
9601 9601 * replicas if this mapping was established after the original file
9602 9602 * was changed.
9603 9603 */
9604 9604 if (vn_is_mapped(vp, V_WRITE)) {
9605 9605 mutex_exit(&svntr_hashtab[hash].tr_lock);
9606 9606 svd->tr_state = SEGVN_TR_OFF;
9607 9607 SEGVN_TR_ADDSTAT(wrcnt);
9608 9608 return;
9609 9609 }
9610 9610 svntrp = svntr_hashtab[hash].tr_head;
9611 9611 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9612 9612 ASSERT(svntrp->tr_refcnt != 0);
9613 9613 if (svntrp->tr_vp != vp) {
9614 9614 continue;
9615 9615 }
9616 9616
9617 9617 /*
9618 9618 * Bail out if the file or its attributes were changed after
9619 9619 * this replication entry was created since we need to use the
9620 9620 * latest file contents. Note that mtime test alone is not
9621 9621 * sufficient because a user can explicitly change mtime via
9622 9622 * utimes(2) interfaces back to the old value after modifiying
9623 9623 * the file contents. To detect this case we also have to test
9624 9624 * ctime which among other things records the time of the last
9625 9625 * mtime change by utimes(2). ctime is not changed when the file
9626 9626 * is only read or executed so we expect that typically existing
9627 9627 * replication amp's can be used most of the time.
9628 9628 */
9629 9629 if (!svntrp->tr_valid ||
9630 9630 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9631 9631 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9632 9632 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9633 9633 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9634 9634 mutex_exit(&svntr_hashtab[hash].tr_lock);
9635 9635 svd->tr_state = SEGVN_TR_OFF;
9636 9636 SEGVN_TR_ADDSTAT(stale);
9637 9637 return;
9638 9638 }
9639 9639 /*
9640 9640 * if off, eoff and szc match current segment we found the
9641 9641 * existing entry we can use.
9642 9642 */
9643 9643 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9644 9644 svntrp->tr_szc == szc) {
9645 9645 break;
9646 9646 }
9647 9647 /*
9648 9648 * Don't create different but overlapping in file offsets
9649 9649 * entries to avoid replication of the same file pages more
9650 9650 * than once per lgroup.
9651 9651 */
9652 9652 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9653 9653 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9654 9654 mutex_exit(&svntr_hashtab[hash].tr_lock);
9655 9655 svd->tr_state = SEGVN_TR_OFF;
9656 9656 SEGVN_TR_ADDSTAT(overlap);
9657 9657 return;
9658 9658 }
9659 9659 }
9660 9660 /*
9661 9661 * If we didn't find existing entry create a new one.
9662 9662 */
9663 9663 if (svntrp == NULL) {
9664 9664 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9665 9665 if (svntrp == NULL) {
9666 9666 mutex_exit(&svntr_hashtab[hash].tr_lock);
9667 9667 svd->tr_state = SEGVN_TR_OFF;
9668 9668 SEGVN_TR_ADDSTAT(nokmem);
9669 9669 return;
9670 9670 }
9671 9671 #ifdef DEBUG
9672 9672 {
9673 9673 lgrp_id_t i;
9674 9674 for (i = 0; i < NLGRPS_MAX; i++) {
9675 9675 ASSERT(svntrp->tr_amp[i] == NULL);
9676 9676 }
9677 9677 }
9678 9678 #endif /* DEBUG */
9679 9679 svntrp->tr_vp = vp;
9680 9680 svntrp->tr_off = off;
9681 9681 svntrp->tr_eoff = eoff;
9682 9682 svntrp->tr_szc = szc;
9683 9683 svntrp->tr_valid = 1;
9684 9684 svntrp->tr_mtime = va.va_mtime;
9685 9685 svntrp->tr_ctime = va.va_ctime;
9686 9686 svntrp->tr_refcnt = 0;
9687 9687 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9688 9688 svntr_hashtab[hash].tr_head = svntrp;
9689 9689 }
9690 9690 first = 1;
9691 9691 again:
9692 9692 /*
9693 9693 * We want to pick a replica with pages on main thread's (t_tid = 1,
9694 9694 * aka T1) lgrp. Currently text replication is only optimized for
9695 9695 * workloads that either have all threads of a process on the same
9696 9696 * lgrp or execute their large text primarily on main thread.
9697 9697 */
9698 9698 lgrp_id = p->p_t1_lgrpid;
9699 9699 if (lgrp_id == LGRP_NONE) {
9700 9700 /*
9701 9701 * In case exec() prefaults text on non main thread use
9702 9702 * current thread lgrpid. It will become main thread anyway
9703 9703 * soon.
↓ open down ↓ |
9703 lines elided |
↑ open up ↑ |
9704 9704 */
9705 9705 lgrp_id = lgrp_home_id(curthread);
9706 9706 }
9707 9707 /*
9708 9708 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9709 9709 * just set it to NLGRPS_MAX if it's different from current process T1
9710 9710 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9711 9711 * replication and T1 new home is different from lgrp used for text
9712 9712 * replication. When this happens asyncronous segvn thread rechecks if
9713 9713 * segments should change lgrps used for text replication. If we fail
9714 - * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas
9715 - * if it's not already NLGRPS_MAX and not equal lgrp_id we want to
9716 - * use. We don't need to use cas in this case because another thread
9717 - * that races in between our non atomic check and set may only change
9718 - * p_tr_lgrpid to NLGRPS_MAX at this point.
9714 + * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9715 + * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9716 + * we want to use. We don't need to use cas in this case because
9717 + * another thread that races in between our non atomic check and set
9718 + * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9719 9719 */
9720 9720 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9721 9721 olid = p->p_tr_lgrpid;
9722 9722 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9723 9723 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9724 - if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) {
9724 + if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9725 + olid) {
9725 9726 olid = p->p_tr_lgrpid;
9726 9727 ASSERT(olid != LGRP_NONE);
9727 9728 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9728 9729 p->p_tr_lgrpid = NLGRPS_MAX;
9729 9730 }
9730 9731 }
9731 9732 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9732 9733 membar_producer();
9733 9734 /*
9734 9735 * lgrp_move_thread() won't schedule async recheck after
9735 9736 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9736 9737 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9737 9738 * is not LGRP_NONE.
9738 9739 */
9739 9740 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9740 9741 p->p_t1_lgrpid != lgrp_id) {
9741 9742 first = 0;
9742 9743 goto again;
9743 9744 }
9744 9745 }
9745 9746 /*
9746 9747 * If no amp was created yet for lgrp_id create a new one as long as
9747 9748 * we have enough memory to afford it.
9748 9749 */
9749 9750 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9750 9751 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9751 9752 if (trmem > segvn_textrepl_max_bytes) {
9752 9753 SEGVN_TR_ADDSTAT(normem);
9753 9754 goto fail;
9754 9755 }
9755 9756 if (anon_try_resv_zone(size, NULL) == 0) {
9756 9757 SEGVN_TR_ADDSTAT(noanon);
9757 9758 goto fail;
9758 9759 }
9759 9760 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9760 9761 if (amp == NULL) {
9761 9762 anon_unresv_zone(size, NULL);
9762 9763 SEGVN_TR_ADDSTAT(nokmem);
9763 9764 goto fail;
9764 9765 }
9765 9766 ASSERT(amp->refcnt == 1);
9766 9767 amp->a_szc = szc;
9767 9768 svntrp->tr_amp[lgrp_id] = amp;
9768 9769 SEGVN_TR_ADDSTAT(newamp);
9769 9770 }
9770 9771 svntrp->tr_refcnt++;
9771 9772 ASSERT(svd->svn_trnext == NULL);
9772 9773 ASSERT(svd->svn_trprev == NULL);
9773 9774 svd->svn_trnext = svntrp->tr_svnhead;
9774 9775 svd->svn_trprev = NULL;
9775 9776 if (svntrp->tr_svnhead != NULL) {
9776 9777 svntrp->tr_svnhead->svn_trprev = svd;
9777 9778 }
9778 9779 svntrp->tr_svnhead = svd;
9779 9780 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9780 9781 ASSERT(amp->refcnt >= 1);
9781 9782 svd->amp = amp;
9782 9783 svd->anon_index = 0;
9783 9784 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9784 9785 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9785 9786 svd->tr_state = SEGVN_TR_ON;
9786 9787 mutex_exit(&svntr_hashtab[hash].tr_lock);
9787 9788 SEGVN_TR_ADDSTAT(repl);
9788 9789 return;
9789 9790 fail:
9790 9791 ASSERT(segvn_textrepl_bytes >= size);
9791 9792 atomic_add_long(&segvn_textrepl_bytes, -size);
9792 9793 ASSERT(svntrp != NULL);
9793 9794 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9794 9795 if (svntrp->tr_refcnt == 0) {
9795 9796 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9796 9797 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9797 9798 mutex_exit(&svntr_hashtab[hash].tr_lock);
9798 9799 kmem_cache_free(svntr_cache, svntrp);
9799 9800 } else {
9800 9801 mutex_exit(&svntr_hashtab[hash].tr_lock);
9801 9802 }
9802 9803 svd->tr_state = SEGVN_TR_OFF;
9803 9804 }
9804 9805
9805 9806 /*
9806 9807 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9807 9808 * replication amp. This routine is most typically called when segment is
9808 9809 * unmapped but can also be called when segment no longer qualifies for text
9809 9810 * replication (e.g. due to protection changes). If unload_unmap is set use
9810 9811 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9811 9812 * svntr free all its anon maps and remove it from the hash table.
9812 9813 */
9813 9814 static void
9814 9815 segvn_textunrepl(struct seg *seg, int unload_unmap)
9815 9816 {
9816 9817 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9817 9818 vnode_t *vp = svd->vp;
9818 9819 u_offset_t off = svd->offset;
9819 9820 size_t size = seg->s_size;
9820 9821 u_offset_t eoff = off + size;
9821 9822 uint_t szc = seg->s_szc;
9822 9823 ulong_t hash = SVNTR_HASH_FUNC(vp);
9823 9824 svntr_t *svntrp;
9824 9825 svntr_t **prv_svntrp;
9825 9826 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9826 9827 lgrp_id_t i;
9827 9828
9828 9829 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9829 9830 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9830 9831 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9831 9832 ASSERT(svd->tr_state == SEGVN_TR_ON);
9832 9833 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9833 9834 ASSERT(svd->amp != NULL);
9834 9835 ASSERT(svd->amp->refcnt >= 1);
9835 9836 ASSERT(svd->anon_index == 0);
9836 9837 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9837 9838 ASSERT(svntr_hashtab != NULL);
9838 9839
9839 9840 mutex_enter(&svntr_hashtab[hash].tr_lock);
9840 9841 prv_svntrp = &svntr_hashtab[hash].tr_head;
9841 9842 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9842 9843 ASSERT(svntrp->tr_refcnt != 0);
9843 9844 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9844 9845 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9845 9846 break;
9846 9847 }
9847 9848 }
9848 9849 if (svntrp == NULL) {
9849 9850 panic("segvn_textunrepl: svntr record not found");
9850 9851 }
9851 9852 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9852 9853 panic("segvn_textunrepl: amp mismatch");
9853 9854 }
9854 9855 svd->tr_state = SEGVN_TR_OFF;
9855 9856 svd->amp = NULL;
9856 9857 if (svd->svn_trprev == NULL) {
9857 9858 ASSERT(svntrp->tr_svnhead == svd);
9858 9859 svntrp->tr_svnhead = svd->svn_trnext;
9859 9860 if (svntrp->tr_svnhead != NULL) {
9860 9861 svntrp->tr_svnhead->svn_trprev = NULL;
9861 9862 }
9862 9863 svd->svn_trnext = NULL;
9863 9864 } else {
9864 9865 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9865 9866 if (svd->svn_trnext != NULL) {
9866 9867 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9867 9868 svd->svn_trnext = NULL;
9868 9869 }
9869 9870 svd->svn_trprev = NULL;
9870 9871 }
9871 9872 if (--svntrp->tr_refcnt) {
9872 9873 mutex_exit(&svntr_hashtab[hash].tr_lock);
9873 9874 goto done;
9874 9875 }
9875 9876 *prv_svntrp = svntrp->tr_next;
9876 9877 mutex_exit(&svntr_hashtab[hash].tr_lock);
9877 9878 for (i = 0; i < NLGRPS_MAX; i++) {
9878 9879 struct anon_map *amp = svntrp->tr_amp[i];
9879 9880 if (amp == NULL) {
9880 9881 continue;
9881 9882 }
9882 9883 ASSERT(amp->refcnt == 1);
9883 9884 ASSERT(amp->swresv == size);
9884 9885 ASSERT(amp->size == size);
9885 9886 ASSERT(amp->a_szc == szc);
9886 9887 if (amp->a_szc != 0) {
9887 9888 anon_free_pages(amp->ahp, 0, size, szc);
9888 9889 } else {
9889 9890 anon_free(amp->ahp, 0, size);
9890 9891 }
9891 9892 svntrp->tr_amp[i] = NULL;
9892 9893 ASSERT(segvn_textrepl_bytes >= size);
9893 9894 atomic_add_long(&segvn_textrepl_bytes, -size);
9894 9895 anon_unresv_zone(amp->swresv, NULL);
9895 9896 amp->refcnt = 0;
9896 9897 anonmap_free(amp);
9897 9898 }
9898 9899 kmem_cache_free(svntr_cache, svntrp);
9899 9900 done:
9900 9901 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9901 9902 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9902 9903 }
9903 9904
9904 9905 /*
9905 9906 * This is called when a MAP_SHARED writable mapping is created to a vnode
9906 9907 * that is currently used for execution (VVMEXEC flag is set). In this case we
9907 9908 * need to prevent further use of existing replicas.
9908 9909 */
9909 9910 static void
9910 9911 segvn_inval_trcache(vnode_t *vp)
9911 9912 {
9912 9913 ulong_t hash = SVNTR_HASH_FUNC(vp);
9913 9914 svntr_t *svntrp;
9914 9915
9915 9916 ASSERT(vp->v_flag & VVMEXEC);
9916 9917
9917 9918 if (svntr_hashtab == NULL) {
9918 9919 return;
9919 9920 }
9920 9921
9921 9922 mutex_enter(&svntr_hashtab[hash].tr_lock);
9922 9923 svntrp = svntr_hashtab[hash].tr_head;
9923 9924 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9924 9925 ASSERT(svntrp->tr_refcnt != 0);
9925 9926 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9926 9927 svntrp->tr_valid = 0;
9927 9928 }
9928 9929 }
9929 9930 mutex_exit(&svntr_hashtab[hash].tr_lock);
9930 9931 }
9931 9932
9932 9933 static void
9933 9934 segvn_trasync_thread(void)
9934 9935 {
9935 9936 callb_cpr_t cpr_info;
9936 9937 kmutex_t cpr_lock; /* just for CPR stuff */
9937 9938
9938 9939 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9939 9940
9940 9941 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9941 9942 callb_generic_cpr, "segvn_async");
9942 9943
9943 9944 if (segvn_update_textrepl_interval == 0) {
9944 9945 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9945 9946 } else {
9946 9947 segvn_update_textrepl_interval *= hz;
9947 9948 }
9948 9949 (void) timeout(segvn_trupdate_wakeup, NULL,
9949 9950 segvn_update_textrepl_interval);
9950 9951
9951 9952 for (;;) {
9952 9953 mutex_enter(&cpr_lock);
9953 9954 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9954 9955 mutex_exit(&cpr_lock);
9955 9956 sema_p(&segvn_trasync_sem);
9956 9957 mutex_enter(&cpr_lock);
9957 9958 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9958 9959 mutex_exit(&cpr_lock);
9959 9960 segvn_trupdate();
9960 9961 }
9961 9962 }
9962 9963
9963 9964 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9964 9965
9965 9966 static void
9966 9967 segvn_trupdate_wakeup(void *dummy)
9967 9968 {
9968 9969 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9969 9970
9970 9971 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9971 9972 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9972 9973 sema_v(&segvn_trasync_sem);
9973 9974 }
9974 9975
9975 9976 if (!segvn_disable_textrepl_update &&
9976 9977 segvn_update_textrepl_interval != 0) {
9977 9978 (void) timeout(segvn_trupdate_wakeup, dummy,
9978 9979 segvn_update_textrepl_interval);
9979 9980 }
9980 9981 }
9981 9982
9982 9983 static void
9983 9984 segvn_trupdate(void)
9984 9985 {
9985 9986 ulong_t hash;
9986 9987 svntr_t *svntrp;
9987 9988 segvn_data_t *svd;
9988 9989
9989 9990 ASSERT(svntr_hashtab != NULL);
9990 9991
9991 9992 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9992 9993 mutex_enter(&svntr_hashtab[hash].tr_lock);
9993 9994 svntrp = svntr_hashtab[hash].tr_head;
9994 9995 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9995 9996 ASSERT(svntrp->tr_refcnt != 0);
9996 9997 svd = svntrp->tr_svnhead;
9997 9998 for (; svd != NULL; svd = svd->svn_trnext) {
9998 9999 segvn_trupdate_seg(svd->seg, svd, svntrp,
9999 10000 hash);
10000 10001 }
10001 10002 }
10002 10003 mutex_exit(&svntr_hashtab[hash].tr_lock);
10003 10004 }
10004 10005 }
10005 10006
10006 10007 static void
10007 10008 segvn_trupdate_seg(struct seg *seg,
10008 10009 segvn_data_t *svd,
10009 10010 svntr_t *svntrp,
10010 10011 ulong_t hash)
10011 10012 {
10012 10013 proc_t *p;
10013 10014 lgrp_id_t lgrp_id;
10014 10015 struct as *as;
10015 10016 size_t size;
10016 10017 struct anon_map *amp;
10017 10018
10018 10019 ASSERT(svd->vp != NULL);
10019 10020 ASSERT(svd->vp == svntrp->tr_vp);
10020 10021 ASSERT(svd->offset == svntrp->tr_off);
10021 10022 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10022 10023 ASSERT(seg != NULL);
10023 10024 ASSERT(svd->seg == seg);
10024 10025 ASSERT(seg->s_data == (void *)svd);
10025 10026 ASSERT(seg->s_szc == svntrp->tr_szc);
10026 10027 ASSERT(svd->tr_state == SEGVN_TR_ON);
10027 10028 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10028 10029 ASSERT(svd->amp != NULL);
10029 10030 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10030 10031 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10031 10032 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10032 10033 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10033 10034 ASSERT(svntrp->tr_refcnt != 0);
10034 10035 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10035 10036
10036 10037 as = seg->s_as;
10037 10038 ASSERT(as != NULL && as != &kas);
10038 10039 p = as->a_proc;
10039 10040 ASSERT(p != NULL);
10040 10041 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10041 10042 lgrp_id = p->p_t1_lgrpid;
10042 10043 if (lgrp_id == LGRP_NONE) {
10043 10044 return;
10044 10045 }
10045 10046 ASSERT(lgrp_id < NLGRPS_MAX);
10046 10047 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10047 10048 return;
10048 10049 }
10049 10050
10050 10051 /*
10051 10052 * Use tryenter locking since we are locking as/seg and svntr hash
10052 10053 * lock in reverse from syncrounous thread order.
10053 10054 */
10054 10055 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10055 10056 SEGVN_TR_ADDSTAT(nolock);
10056 10057 if (segvn_lgrp_trthr_migrs_snpsht) {
10057 10058 segvn_lgrp_trthr_migrs_snpsht = 0;
10058 10059 }
10059 10060 return;
10060 10061 }
10061 10062 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10062 10063 AS_LOCK_EXIT(as, &as->a_lock);
10063 10064 SEGVN_TR_ADDSTAT(nolock);
10064 10065 if (segvn_lgrp_trthr_migrs_snpsht) {
10065 10066 segvn_lgrp_trthr_migrs_snpsht = 0;
10066 10067 }
10067 10068 return;
10068 10069 }
10069 10070 size = seg->s_size;
10070 10071 if (svntrp->tr_amp[lgrp_id] == NULL) {
10071 10072 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10072 10073 if (trmem > segvn_textrepl_max_bytes) {
10073 10074 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10074 10075 AS_LOCK_EXIT(as, &as->a_lock);
10075 10076 atomic_add_long(&segvn_textrepl_bytes, -size);
10076 10077 SEGVN_TR_ADDSTAT(normem);
10077 10078 return;
10078 10079 }
10079 10080 if (anon_try_resv_zone(size, NULL) == 0) {
10080 10081 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10081 10082 AS_LOCK_EXIT(as, &as->a_lock);
10082 10083 atomic_add_long(&segvn_textrepl_bytes, -size);
10083 10084 SEGVN_TR_ADDSTAT(noanon);
10084 10085 return;
10085 10086 }
10086 10087 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10087 10088 if (amp == NULL) {
10088 10089 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10089 10090 AS_LOCK_EXIT(as, &as->a_lock);
10090 10091 atomic_add_long(&segvn_textrepl_bytes, -size);
10091 10092 anon_unresv_zone(size, NULL);
10092 10093 SEGVN_TR_ADDSTAT(nokmem);
10093 10094 return;
10094 10095 }
10095 10096 ASSERT(amp->refcnt == 1);
10096 10097 amp->a_szc = seg->s_szc;
10097 10098 svntrp->tr_amp[lgrp_id] = amp;
10098 10099 }
10099 10100 /*
10100 10101 * We don't need to drop the bucket lock but here we give other
10101 10102 * threads a chance. svntr and svd can't be unlinked as long as
10102 10103 * segment lock is held as a writer and AS held as well. After we
10103 10104 * retake bucket lock we'll continue from where we left. We'll be able
10104 10105 * to reach the end of either list since new entries are always added
10105 10106 * to the beginning of the lists.
10106 10107 */
10107 10108 mutex_exit(&svntr_hashtab[hash].tr_lock);
10108 10109 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10109 10110 mutex_enter(&svntr_hashtab[hash].tr_lock);
10110 10111
10111 10112 ASSERT(svd->tr_state == SEGVN_TR_ON);
10112 10113 ASSERT(svd->amp != NULL);
10113 10114 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10114 10115 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10115 10116 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10116 10117
10117 10118 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10118 10119 svd->amp = svntrp->tr_amp[lgrp_id];
10119 10120 p->p_tr_lgrpid = NLGRPS_MAX;
10120 10121 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10121 10122 AS_LOCK_EXIT(as, &as->a_lock);
10122 10123
10123 10124 ASSERT(svntrp->tr_refcnt != 0);
10124 10125 ASSERT(svd->vp == svntrp->tr_vp);
10125 10126 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10126 10127 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10127 10128 ASSERT(svd->seg == seg);
10128 10129 ASSERT(svd->tr_state == SEGVN_TR_ON);
10129 10130
10130 10131 SEGVN_TR_ADDSTAT(asyncrepl);
10131 10132 }
↓ open down ↓ |
397 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX