1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_badop(void); 79 static void segkp_dump(struct seg *seg); 80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 81 uint_t prot); 82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 84 struct page ***page, enum lock_type type, 85 enum seg_rw rw); 86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 89 struct segkp_data **tkpd, struct anon_map *amp); 90 static void segkp_release_internal(struct seg *seg, 91 struct segkp_data *kpd, size_t len); 92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 93 size_t len, struct segkp_data *kpd, uint_t flags); 94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 95 size_t len, struct segkp_data *kpd, uint_t flags); 96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 97 98 /* 99 * Lock used to protect the hash table(s) and caches. 100 */ 101 static kmutex_t segkp_lock; 102 103 /* 104 * The segkp caches 105 */ 106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 107 108 #define SEGKP_BADOP(t) (t(*)())segkp_badop 109 110 /* 111 * When there are fewer than red_minavail bytes left on the stack, 112 * segkp_map_red() will map in the redzone (if called). 5000 seems 113 * to work reasonably well... 114 */ 115 long red_minavail = 5000; 116 117 /* 118 * will be set to 1 for 32 bit x86 systems only, in startup.c 119 */ 120 int segkp_fromheap = 0; 121 ulong_t *segkp_bitmap; 122 123 /* 124 * If segkp_map_red() is called with the redzone already mapped and 125 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 126 * then the stack situation has become quite serious; if much more stack 127 * is consumed, we have the potential of scrogging the next thread/LWP 128 * structure. To help debug the "can't happen" panics which may 129 * result from this condition, we record hrestime and the calling thread 130 * in red_deep_hires and red_deep_thread respectively. 131 */ 132 #define RED_DEEP_THRESHOLD 2000 133 134 hrtime_t red_deep_hires; 135 kthread_t *red_deep_thread; 136 137 uint32_t red_nmapped; 138 uint32_t red_closest = UINT_MAX; 139 uint32_t red_ndoubles; 140 141 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 142 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 143 144 static struct seg_ops segkp_ops = { 145 .dup = SEGKP_BADOP(int), 146 .unmap = SEGKP_BADOP(int), 147 .free = SEGKP_BADOP(void), 148 .fault = segkp_fault, 149 .faulta = SEGKP_BADOP(faultcode_t), 150 .setprot = SEGKP_BADOP(int), 151 .checkprot = segkp_checkprot, 152 .kluster = segkp_kluster, 153 .swapout = SEGKP_BADOP(size_t), 154 .sync = SEGKP_BADOP(int), 155 .incore = SEGKP_BADOP(size_t), 156 .lockop = SEGKP_BADOP(int), 157 .getprot = SEGKP_BADOP(int), 158 .getoffset = SEGKP_BADOP(u_offset_t), 159 .gettype = SEGKP_BADOP(int), 160 .getvp = SEGKP_BADOP(int), 161 .advise = SEGKP_BADOP(int), 162 .dump = segkp_dump, 163 .pagelock = segkp_pagelock, 164 .setpagesize = SEGKP_BADOP(int), 165 }; 166 167 168 static void 169 segkp_badop(void) 170 { 171 panic("segkp_badop"); 172 /*NOTREACHED*/ 173 } 174 175 static void segkpinit_mem_config(struct seg *); 176 177 static uint32_t segkp_indel; 178 179 /* 180 * Allocate the segment specific private data struct and fill it in 181 * with the per kp segment mutex, anon ptr. array and hash table. 182 */ 183 int 184 segkp_create(struct seg *seg) 185 { 186 struct segkp_segdata *kpsd; 187 size_t np; 188 189 ASSERT(seg != NULL && seg->s_as == &kas); 190 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 191 192 if (seg->s_size & PAGEOFFSET) { 193 panic("Bad segkp size"); 194 /*NOTREACHED*/ 195 } 196 197 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 198 199 /* 200 * Allocate the virtual memory for segkp and initialize it 201 */ 202 if (segkp_fromheap) { 203 np = btop(kvseg.s_size); 204 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 205 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 206 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 207 } else { 208 segkp_bitmap = NULL; 209 np = btop(seg->s_size); 210 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 211 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 212 VM_SLEEP); 213 } 214 215 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 216 217 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 218 KM_SLEEP); 219 seg->s_data = (void *)kpsd; 220 seg->s_ops = &segkp_ops; 221 segkpinit_mem_config(seg); 222 return (0); 223 } 224 225 226 /* 227 * Find a free 'freelist' and initialize it with the appropriate attributes 228 */ 229 void * 230 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 231 { 232 int i; 233 234 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 235 return ((void *)-1); 236 237 mutex_enter(&segkp_lock); 238 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 239 if (segkp_cache[i].kpf_inuse) 240 continue; 241 segkp_cache[i].kpf_inuse = 1; 242 segkp_cache[i].kpf_max = maxsize; 243 segkp_cache[i].kpf_flags = flags; 244 segkp_cache[i].kpf_seg = seg; 245 segkp_cache[i].kpf_len = len; 246 mutex_exit(&segkp_lock); 247 return ((void *)(uintptr_t)i); 248 } 249 mutex_exit(&segkp_lock); 250 return ((void *)-1); 251 } 252 253 /* 254 * Free all the cache resources. 255 */ 256 void 257 segkp_cache_free(void) 258 { 259 struct segkp_data *kpd; 260 struct seg *seg; 261 int i; 262 263 mutex_enter(&segkp_lock); 264 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 265 if (!segkp_cache[i].kpf_inuse) 266 continue; 267 /* 268 * Disconnect the freelist and process each element 269 */ 270 kpd = segkp_cache[i].kpf_list; 271 seg = segkp_cache[i].kpf_seg; 272 segkp_cache[i].kpf_list = NULL; 273 segkp_cache[i].kpf_count = 0; 274 mutex_exit(&segkp_lock); 275 276 while (kpd != NULL) { 277 struct segkp_data *next; 278 279 next = kpd->kp_next; 280 segkp_release_internal(seg, kpd, kpd->kp_len); 281 kpd = next; 282 } 283 mutex_enter(&segkp_lock); 284 } 285 mutex_exit(&segkp_lock); 286 } 287 288 /* 289 * There are 2 entries into segkp_get_internal. The first includes a cookie 290 * used to access a pool of cached segkp resources. The second does not 291 * use the cache. 292 */ 293 caddr_t 294 segkp_get(struct seg *seg, size_t len, uint_t flags) 295 { 296 struct segkp_data *kpd = NULL; 297 298 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 299 kpd->kp_cookie = -1; 300 return (stom(kpd->kp_base, flags)); 301 } 302 return (NULL); 303 } 304 305 /* 306 * Return a 'cached' segkp address 307 */ 308 caddr_t 309 segkp_cache_get(void *cookie) 310 { 311 struct segkp_cache *freelist = NULL; 312 struct segkp_data *kpd = NULL; 313 int index = (int)(uintptr_t)cookie; 314 struct seg *seg; 315 size_t len; 316 uint_t flags; 317 318 if (index < 0 || index >= SEGKP_MAX_CACHE) 319 return (NULL); 320 freelist = &segkp_cache[index]; 321 322 mutex_enter(&segkp_lock); 323 seg = freelist->kpf_seg; 324 flags = freelist->kpf_flags; 325 if (freelist->kpf_list != NULL) { 326 kpd = freelist->kpf_list; 327 freelist->kpf_list = kpd->kp_next; 328 freelist->kpf_count--; 329 mutex_exit(&segkp_lock); 330 kpd->kp_next = NULL; 331 segkp_insert(seg, kpd); 332 return (stom(kpd->kp_base, flags)); 333 } 334 len = freelist->kpf_len; 335 mutex_exit(&segkp_lock); 336 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 337 kpd->kp_cookie = index; 338 return (stom(kpd->kp_base, flags)); 339 } 340 return (NULL); 341 } 342 343 caddr_t 344 segkp_get_withanonmap( 345 struct seg *seg, 346 size_t len, 347 uint_t flags, 348 struct anon_map *amp) 349 { 350 struct segkp_data *kpd = NULL; 351 352 ASSERT(amp != NULL); 353 flags |= KPD_HASAMP; 354 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 355 kpd->kp_cookie = -1; 356 return (stom(kpd->kp_base, flags)); 357 } 358 return (NULL); 359 } 360 361 /* 362 * This does the real work of segkp allocation. 363 * Return to client base addr. len must be page-aligned. A null value is 364 * returned if there are no more vm resources (e.g. pages, swap). The len 365 * and base recorded in the private data structure include the redzone 366 * and the redzone length (if applicable). If the user requests a redzone 367 * either the first or last page is left unmapped depending whether stacks 368 * grow to low or high memory. 369 * 370 * The client may also specify a no-wait flag. If that is set then the 371 * request will choose a non-blocking path when requesting resources. 372 * The default is make the client wait. 373 */ 374 static caddr_t 375 segkp_get_internal( 376 struct seg *seg, 377 size_t len, 378 uint_t flags, 379 struct segkp_data **tkpd, 380 struct anon_map *amp) 381 { 382 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 383 struct segkp_data *kpd; 384 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 385 pgcnt_t np = 0; /* number of pages in the resource */ 386 pgcnt_t segkpindex; 387 long i; 388 caddr_t va; 389 pgcnt_t pages = 0; 390 ulong_t anon_idx = 0; 391 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 392 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 393 394 if (len & PAGEOFFSET) { 395 panic("segkp_get: len is not page-aligned"); 396 /*NOTREACHED*/ 397 } 398 399 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 400 401 /* Only allow KPD_NO_ANON if we are going to lock it down */ 402 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 403 return (NULL); 404 405 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 406 return (NULL); 407 /* 408 * Fix up the len to reflect the REDZONE if applicable 409 */ 410 if (flags & KPD_HASREDZONE) 411 len += PAGESIZE; 412 np = btop(len); 413 414 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 415 if (vbase == NULL) { 416 kmem_free(kpd, sizeof (struct segkp_data)); 417 return (NULL); 418 } 419 420 /* If locking, reserve physical memory */ 421 if (flags & KPD_LOCKED) { 422 pages = btop(SEGKP_MAPLEN(len, flags)); 423 if (page_resv(pages, kmflag) == 0) { 424 vmem_free(SEGKP_VMEM(seg), vbase, len); 425 kmem_free(kpd, sizeof (struct segkp_data)); 426 return (NULL); 427 } 428 if ((flags & KPD_NO_ANON) == 0) 429 atomic_add_long(&anon_segkp_pages_locked, pages); 430 } 431 432 /* 433 * Reserve sufficient swap space for this vm resource. We'll 434 * actually allocate it in the loop below, but reserving it 435 * here allows us to back out more gracefully than if we 436 * had an allocation failure in the body of the loop. 437 * 438 * Note that we don't need swap space for the red zone page. 439 */ 440 if (amp != NULL) { 441 /* 442 * The swap reservation has been done, if required, and the 443 * anon_hdr is separate. 444 */ 445 anon_idx = 0; 446 kpd->kp_anon_idx = anon_idx; 447 kpd->kp_anon = amp->ahp; 448 449 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 450 kpd, vbase, len, flags, 1); 451 452 } else if ((flags & KPD_NO_ANON) == 0) { 453 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 454 if (flags & KPD_LOCKED) { 455 atomic_add_long(&anon_segkp_pages_locked, 456 -pages); 457 page_unresv(pages); 458 } 459 vmem_free(SEGKP_VMEM(seg), vbase, len); 460 kmem_free(kpd, sizeof (struct segkp_data)); 461 return (NULL); 462 } 463 atomic_add_long(&anon_segkp_pages_resv, 464 btop(SEGKP_MAPLEN(len, flags))); 465 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 466 kpd->kp_anon_idx = anon_idx; 467 kpd->kp_anon = kpsd->kpsd_anon; 468 469 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 470 kpd, vbase, len, flags, 1); 471 } else { 472 kpd->kp_anon = NULL; 473 kpd->kp_anon_idx = 0; 474 } 475 476 /* 477 * Allocate page and anon resources for the virtual address range 478 * except the redzone 479 */ 480 if (segkp_fromheap) 481 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 482 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 483 page_t *pl[2]; 484 struct vnode *vp; 485 anoff_t off; 486 int err; 487 page_t *pp = NULL; 488 489 /* 490 * Mark this page to be a segkp page in the bitmap. 491 */ 492 if (segkp_fromheap) { 493 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 494 segkpindex++; 495 } 496 497 /* 498 * If this page is the red zone page, we don't need swap 499 * space for it. Note that we skip over the code that 500 * establishes MMU mappings, so that the page remains 501 * invalid. 502 */ 503 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 504 continue; 505 506 if (kpd->kp_anon != NULL) { 507 struct anon *ap; 508 509 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 510 == NULL); 511 /* 512 * Determine the "vp" and "off" of the anon slot. 513 */ 514 ap = anon_alloc(NULL, 0); 515 if (amp != NULL) 516 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 517 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 518 ap, ANON_SLEEP); 519 if (amp != NULL) 520 ANON_LOCK_EXIT(&->a_rwlock); 521 swap_xlate(ap, &vp, &off); 522 523 /* 524 * Create a page with the specified identity. The 525 * page is returned with the "shared" lock held. 526 */ 527 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 528 NULL, pl, PAGESIZE, seg, va, S_CREATE, 529 kcred, NULL); 530 if (err) { 531 /* 532 * XXX - This should not fail. 533 */ 534 panic("segkp_get: no pages"); 535 /*NOTREACHED*/ 536 } 537 pp = pl[0]; 538 } else { 539 ASSERT(page_exists(&kvp, 540 (u_offset_t)(uintptr_t)va) == NULL); 541 542 if ((pp = page_create_va(&kvp, 543 (u_offset_t)(uintptr_t)va, PAGESIZE, 544 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 545 PG_NORELOC, seg, va)) == NULL) { 546 /* 547 * Legitimize resource; then destroy it. 548 * Easier than trying to unwind here. 549 */ 550 kpd->kp_flags = flags; 551 kpd->kp_base = vbase; 552 kpd->kp_len = len; 553 segkp_release_internal(seg, kpd, va - vbase); 554 return (NULL); 555 } 556 page_io_unlock(pp); 557 } 558 559 if (flags & KPD_ZERO) 560 pagezero(pp, 0, PAGESIZE); 561 562 /* 563 * Load and lock an MMU translation for the page. 564 */ 565 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 566 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 567 568 /* 569 * Now, release lock on the page. 570 */ 571 if (flags & KPD_LOCKED) { 572 /* 573 * Indicate to page_retire framework that this 574 * page can only be retired when it is freed. 575 */ 576 PP_SETRAF(pp); 577 page_downgrade(pp); 578 } else 579 page_unlock(pp); 580 } 581 582 kpd->kp_flags = flags; 583 kpd->kp_base = vbase; 584 kpd->kp_len = len; 585 segkp_insert(seg, kpd); 586 *tkpd = kpd; 587 return (stom(kpd->kp_base, flags)); 588 } 589 590 /* 591 * Release the resource to cache if the pool(designate by the cookie) 592 * has less than the maximum allowable. If inserted in cache, 593 * segkp_delete insures element is taken off of active list. 594 */ 595 void 596 segkp_release(struct seg *seg, caddr_t vaddr) 597 { 598 struct segkp_cache *freelist; 599 struct segkp_data *kpd = NULL; 600 601 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 602 panic("segkp_release: null kpd"); 603 /*NOTREACHED*/ 604 } 605 606 if (kpd->kp_cookie != -1) { 607 freelist = &segkp_cache[kpd->kp_cookie]; 608 mutex_enter(&segkp_lock); 609 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 610 segkp_delete(seg, kpd); 611 kpd->kp_next = freelist->kpf_list; 612 freelist->kpf_list = kpd; 613 freelist->kpf_count++; 614 mutex_exit(&segkp_lock); 615 return; 616 } else { 617 mutex_exit(&segkp_lock); 618 kpd->kp_cookie = -1; 619 } 620 } 621 segkp_release_internal(seg, kpd, kpd->kp_len); 622 } 623 624 /* 625 * Free the entire resource. segkp_unlock gets called with the start of the 626 * mapped portion of the resource. The length is the size of the mapped 627 * portion 628 */ 629 static void 630 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 631 { 632 caddr_t va; 633 long i; 634 long redzone; 635 size_t np; 636 page_t *pp; 637 struct vnode *vp; 638 anoff_t off; 639 struct anon *ap; 640 pgcnt_t segkpindex; 641 642 ASSERT(kpd != NULL); 643 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 644 np = btop(len); 645 646 /* Remove from active hash list */ 647 if (kpd->kp_cookie == -1) { 648 mutex_enter(&segkp_lock); 649 segkp_delete(seg, kpd); 650 mutex_exit(&segkp_lock); 651 } 652 653 /* 654 * Precompute redzone page index. 655 */ 656 redzone = -1; 657 if (kpd->kp_flags & KPD_HASREDZONE) 658 redzone = KPD_REDZONE(kpd); 659 660 661 va = kpd->kp_base; 662 663 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 664 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 665 /* 666 * Free up those anon resources that are quiescent. 667 */ 668 if (segkp_fromheap) 669 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 670 for (i = 0; i < np; i++, va += PAGESIZE) { 671 672 /* 673 * Clear the bit for this page from the bitmap. 674 */ 675 if (segkp_fromheap) { 676 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 677 segkpindex++; 678 } 679 680 if (i == redzone) 681 continue; 682 if (kpd->kp_anon) { 683 /* 684 * Free up anon resources and destroy the 685 * associated pages. 686 * 687 * Release the lock if there is one. Have to get the 688 * page to do this, unfortunately. 689 */ 690 if (kpd->kp_flags & KPD_LOCKED) { 691 ap = anon_get_ptr(kpd->kp_anon, 692 kpd->kp_anon_idx + i); 693 swap_xlate(ap, &vp, &off); 694 /* Find the shared-locked page. */ 695 pp = page_find(vp, (u_offset_t)off); 696 if (pp == NULL) { 697 panic("segkp_release: " 698 "kp_anon: no page to unlock "); 699 /*NOTREACHED*/ 700 } 701 if (PP_ISRAF(pp)) 702 PP_CLRRAF(pp); 703 704 page_unlock(pp); 705 } 706 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 707 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 708 PAGESIZE); 709 anon_unresv_zone(PAGESIZE, NULL); 710 atomic_dec_ulong(&anon_segkp_pages_resv); 711 } 712 TRACE_5(TR_FAC_VM, 713 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 714 kpd, va, PAGESIZE, 0, 0); 715 } else { 716 if (kpd->kp_flags & KPD_LOCKED) { 717 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 718 if (pp == NULL) { 719 panic("segkp_release: " 720 "no page to unlock"); 721 /*NOTREACHED*/ 722 } 723 if (PP_ISRAF(pp)) 724 PP_CLRRAF(pp); 725 /* 726 * We should just upgrade the lock here 727 * but there is no upgrade that waits. 728 */ 729 page_unlock(pp); 730 } 731 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 732 SE_EXCL); 733 if (pp != NULL) 734 page_destroy(pp, 0); 735 } 736 } 737 738 /* If locked, release physical memory reservation */ 739 if (kpd->kp_flags & KPD_LOCKED) { 740 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 741 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 742 atomic_add_long(&anon_segkp_pages_locked, -pages); 743 page_unresv(pages); 744 } 745 746 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 747 kmem_free(kpd, sizeof (struct segkp_data)); 748 } 749 750 /* 751 * segkp_map_red() will check the current frame pointer against the 752 * stack base. If the amount of stack remaining is questionable 753 * (less than red_minavail), then segkp_map_red() will map in the redzone 754 * and return 1. Otherwise, it will return 0. segkp_map_red() can 755 * _only_ be called when: 756 * 757 * - it is safe to sleep on page_create_va(). 758 * - the caller is non-swappable. 759 * 760 * It is up to the caller to remember whether segkp_map_red() successfully 761 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 762 * time. Note that the caller must _remain_ non-swappable until after 763 * calling segkp_unmap_red(). 764 * 765 * Currently, this routine is only called from pagefault() (which necessarily 766 * satisfies the above conditions). 767 */ 768 #if defined(STACK_GROWTH_DOWN) 769 int 770 segkp_map_red(void) 771 { 772 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 773 #ifndef _LP64 774 caddr_t stkbase; 775 #endif 776 777 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 778 779 /* 780 * Optimize for the common case where we simply return. 781 */ 782 if ((curthread->t_red_pp == NULL) && 783 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 784 return (0); 785 786 #if defined(_LP64) 787 /* 788 * XXX We probably need something better than this. 789 */ 790 panic("kernel stack overflow"); 791 /*NOTREACHED*/ 792 #else /* _LP64 */ 793 if (curthread->t_red_pp == NULL) { 794 page_t *red_pp; 795 struct seg kseg; 796 797 caddr_t red_va = (caddr_t) 798 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 799 PAGESIZE); 800 801 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 802 NULL); 803 804 /* 805 * Allocate the physical for the red page. 806 */ 807 /* 808 * No PG_NORELOC here to avoid waits. Unlikely to get 809 * a relocate happening in the short time the page exists 810 * and it will be OK anyway. 811 */ 812 813 kseg.s_as = &kas; 814 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 815 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 816 ASSERT(red_pp != NULL); 817 818 /* 819 * So we now have a page to jam into the redzone... 820 */ 821 page_io_unlock(red_pp); 822 823 hat_memload(kas.a_hat, red_va, red_pp, 824 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 825 page_downgrade(red_pp); 826 827 /* 828 * The page is left SE_SHARED locked so we can hold on to 829 * the page_t pointer. 830 */ 831 curthread->t_red_pp = red_pp; 832 833 atomic_inc_32(&red_nmapped); 834 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 835 (void) atomic_cas_32(&red_closest, red_closest, 836 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 837 } 838 return (1); 839 } 840 841 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 842 (uintptr_t)PAGEMASK) - PAGESIZE); 843 844 atomic_inc_32(&red_ndoubles); 845 846 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 847 /* 848 * Oh boy. We're already deep within the mapped-in 849 * redzone page, and the caller is trying to prepare 850 * for a deep stack run. We're running without a 851 * redzone right now: if the caller plows off the 852 * end of the stack, it'll plow another thread or 853 * LWP structure. That situation could result in 854 * a very hard-to-debug panic, so, in the spirit of 855 * recording the name of one's killer in one's own 856 * blood, we're going to record hrestime and the calling 857 * thread. 858 */ 859 red_deep_hires = hrestime.tv_nsec; 860 red_deep_thread = curthread; 861 } 862 863 /* 864 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 865 */ 866 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 867 return (0); 868 #endif /* _LP64 */ 869 } 870 871 void 872 segkp_unmap_red(void) 873 { 874 page_t *pp; 875 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 876 (uintptr_t)PAGEMASK) - PAGESIZE); 877 878 ASSERT(curthread->t_red_pp != NULL); 879 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 880 881 /* 882 * Because we locked the mapping down, we can't simply rely 883 * on page_destroy() to clean everything up; we need to call 884 * hat_unload() to explicitly unlock the mapping resources. 885 */ 886 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 887 888 pp = curthread->t_red_pp; 889 890 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 891 892 /* 893 * Need to upgrade the SE_SHARED lock to SE_EXCL. 894 */ 895 if (!page_tryupgrade(pp)) { 896 /* 897 * As there is now wait for upgrade, release the 898 * SE_SHARED lock and wait for SE_EXCL. 899 */ 900 page_unlock(pp); 901 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 902 /* pp may be NULL here, hence the test below */ 903 } 904 905 /* 906 * Destroy the page, with dontfree set to zero (i.e. free it). 907 */ 908 if (pp != NULL) 909 page_destroy(pp, 0); 910 curthread->t_red_pp = NULL; 911 } 912 #else 913 #error Red stacks only supported with downwards stack growth. 914 #endif 915 916 /* 917 * Handle a fault on an address corresponding to one of the 918 * resources in the segkp segment. 919 */ 920 faultcode_t 921 segkp_fault( 922 struct hat *hat, 923 struct seg *seg, 924 caddr_t vaddr, 925 size_t len, 926 enum fault_type type, 927 enum seg_rw rw) 928 { 929 struct segkp_data *kpd = NULL; 930 int err; 931 932 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 933 934 /* 935 * Sanity checks. 936 */ 937 if (type == F_PROT) { 938 panic("segkp_fault: unexpected F_PROT fault"); 939 /*NOTREACHED*/ 940 } 941 942 if ((kpd = segkp_find(seg, vaddr)) == NULL) 943 return (FC_NOMAP); 944 945 mutex_enter(&kpd->kp_lock); 946 947 if (type == F_SOFTLOCK) { 948 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 949 /* 950 * The F_SOFTLOCK case has more stringent 951 * range requirements: the given range must exactly coincide 952 * with the resource's mapped portion. Note reference to 953 * redzone is handled since vaddr would not equal base 954 */ 955 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 956 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 957 mutex_exit(&kpd->kp_lock); 958 return (FC_MAKE_ERR(EFAULT)); 959 } 960 961 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 962 mutex_exit(&kpd->kp_lock); 963 return (FC_MAKE_ERR(err)); 964 } 965 kpd->kp_flags |= KPD_LOCKED; 966 mutex_exit(&kpd->kp_lock); 967 return (0); 968 } 969 970 if (type == F_INVAL) { 971 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 972 973 /* 974 * Check if we touched the redzone. Somewhat optimistic 975 * here if we are touching the redzone of our own stack 976 * since we wouldn't have a stack to get this far... 977 */ 978 if ((kpd->kp_flags & KPD_HASREDZONE) && 979 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 980 panic("segkp_fault: accessing redzone"); 981 982 /* 983 * This fault may occur while the page is being F_SOFTLOCK'ed. 984 * Return since a 2nd segkp_load is unnecessary and also would 985 * result in the page being locked twice and eventually 986 * hang the thread_reaper thread. 987 */ 988 if (kpd->kp_flags & KPD_LOCKED) { 989 mutex_exit(&kpd->kp_lock); 990 return (0); 991 } 992 993 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 994 mutex_exit(&kpd->kp_lock); 995 return (err ? FC_MAKE_ERR(err) : 0); 996 } 997 998 if (type == F_SOFTUNLOCK) { 999 uint_t flags; 1000 1001 /* 1002 * Make sure the addr is LOCKED and it has anon backing 1003 * before unlocking 1004 */ 1005 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 1006 panic("segkp_fault: bad unlock"); 1007 /*NOTREACHED*/ 1008 } 1009 1010 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 1011 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 1012 panic("segkp_fault: bad range"); 1013 /*NOTREACHED*/ 1014 } 1015 1016 if (rw == S_WRITE) 1017 flags = kpd->kp_flags | KPD_WRITEDIRTY; 1018 else 1019 flags = kpd->kp_flags; 1020 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 1021 kpd->kp_flags &= ~KPD_LOCKED; 1022 mutex_exit(&kpd->kp_lock); 1023 return (err ? FC_MAKE_ERR(err) : 0); 1024 } 1025 mutex_exit(&kpd->kp_lock); 1026 panic("segkp_fault: bogus fault type: %d\n", type); 1027 /*NOTREACHED*/ 1028 } 1029 1030 /* 1031 * Check that the given protections suffice over the range specified by 1032 * vaddr and len. For this segment type, the only issue is whether or 1033 * not the range lies completely within the mapped part of an allocated 1034 * resource. 1035 */ 1036 /* ARGSUSED */ 1037 static int 1038 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1039 { 1040 struct segkp_data *kpd = NULL; 1041 caddr_t mbase; 1042 size_t mlen; 1043 1044 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1045 return (EACCES); 1046 1047 mutex_enter(&kpd->kp_lock); 1048 mbase = stom(kpd->kp_base, kpd->kp_flags); 1049 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1050 if (len > mlen || vaddr < mbase || 1051 ((vaddr + len) > (mbase + mlen))) { 1052 mutex_exit(&kpd->kp_lock); 1053 return (EACCES); 1054 } 1055 mutex_exit(&kpd->kp_lock); 1056 return (0); 1057 } 1058 1059 1060 /* 1061 * Check to see if it makes sense to do kluster/read ahead to 1062 * addr + delta relative to the mapping at addr. We assume here 1063 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1064 * 1065 * For seg_u we always "approve" of this action from our standpoint. 1066 */ 1067 /*ARGSUSED*/ 1068 static int 1069 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1070 { 1071 return (0); 1072 } 1073 1074 /* 1075 * Load and possibly lock intra-slot resources in the range given by 1076 * vaddr and len. 1077 */ 1078 static int 1079 segkp_load( 1080 struct hat *hat, 1081 struct seg *seg, 1082 caddr_t vaddr, 1083 size_t len, 1084 struct segkp_data *kpd, 1085 uint_t flags) 1086 { 1087 caddr_t va; 1088 caddr_t vlim; 1089 ulong_t i; 1090 uint_t lock; 1091 1092 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1093 1094 len = P2ROUNDUP(len, PAGESIZE); 1095 1096 /* If locking, reserve physical memory */ 1097 if (flags & KPD_LOCKED) { 1098 pgcnt_t pages = btop(len); 1099 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1100 atomic_add_long(&anon_segkp_pages_locked, pages); 1101 (void) page_resv(pages, KM_SLEEP); 1102 } 1103 1104 /* 1105 * Loop through the pages in the given range. 1106 */ 1107 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1108 vaddr = va; 1109 vlim = va + len; 1110 lock = flags & KPD_LOCKED; 1111 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1112 for (; va < vlim; va += PAGESIZE, i++) { 1113 page_t *pl[2]; /* second element NULL terminator */ 1114 struct vnode *vp; 1115 anoff_t off; 1116 int err; 1117 struct anon *ap; 1118 1119 /* 1120 * Summon the page. If it's not resident, arrange 1121 * for synchronous i/o to pull it in. 1122 */ 1123 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1124 swap_xlate(ap, &vp, &off); 1125 1126 /* 1127 * The returned page list will have exactly one entry, 1128 * which is returned to us already kept. 1129 */ 1130 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1131 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1132 1133 if (err) { 1134 /* 1135 * Back out of what we've done so far. 1136 */ 1137 (void) segkp_unlock(hat, seg, vaddr, 1138 (va - vaddr), kpd, flags); 1139 return (err); 1140 } 1141 1142 /* 1143 * Load an MMU translation for the page. 1144 */ 1145 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1146 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1147 1148 if (!lock) { 1149 /* 1150 * Now, release "shared" lock on the page. 1151 */ 1152 page_unlock(pl[0]); 1153 } 1154 } 1155 return (0); 1156 } 1157 1158 /* 1159 * At the very least unload the mmu-translations and unlock the range if locked 1160 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1161 * any dirty pages should be written to disk. 1162 */ 1163 static int 1164 segkp_unlock( 1165 struct hat *hat, 1166 struct seg *seg, 1167 caddr_t vaddr, 1168 size_t len, 1169 struct segkp_data *kpd, 1170 uint_t flags) 1171 { 1172 caddr_t va; 1173 caddr_t vlim; 1174 ulong_t i; 1175 struct page *pp; 1176 struct vnode *vp; 1177 anoff_t off; 1178 struct anon *ap; 1179 1180 #ifdef lint 1181 seg = seg; 1182 #endif /* lint */ 1183 1184 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1185 1186 /* 1187 * Loop through the pages in the given range. It is assumed 1188 * segkp_unlock is called with page aligned base 1189 */ 1190 va = vaddr; 1191 vlim = va + len; 1192 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1193 hat_unload(hat, va, len, 1194 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1195 for (; va < vlim; va += PAGESIZE, i++) { 1196 /* 1197 * Find the page associated with this part of the 1198 * slot, tracking it down through its associated swap 1199 * space. 1200 */ 1201 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1202 swap_xlate(ap, &vp, &off); 1203 1204 if (flags & KPD_LOCKED) { 1205 if ((pp = page_find(vp, off)) == NULL) { 1206 if (flags & KPD_LOCKED) { 1207 panic("segkp_softunlock: missing page"); 1208 /*NOTREACHED*/ 1209 } 1210 } 1211 } else { 1212 /* 1213 * Nothing to do if the slot is not locked and the 1214 * page doesn't exist. 1215 */ 1216 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1217 continue; 1218 } 1219 1220 /* 1221 * If the page doesn't have any translations, is 1222 * dirty and not being shared, then push it out 1223 * asynchronously and avoid waiting for the 1224 * pageout daemon to do it for us. 1225 * 1226 * XXX - Do we really need to get the "exclusive" 1227 * lock via an upgrade? 1228 */ 1229 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1230 hat_ismod(pp) && page_tryupgrade(pp)) { 1231 /* 1232 * Hold the vnode before releasing the page lock to 1233 * prevent it from being freed and re-used by some 1234 * other thread. 1235 */ 1236 VN_HOLD(vp); 1237 page_unlock(pp); 1238 1239 /* 1240 * Want most powerful credentials we can get so 1241 * use kcred. 1242 */ 1243 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1244 B_ASYNC | B_FREE, kcred, NULL); 1245 VN_RELE(vp); 1246 } else { 1247 page_unlock(pp); 1248 } 1249 } 1250 1251 /* If unlocking, release physical memory */ 1252 if (flags & KPD_LOCKED) { 1253 pgcnt_t pages = btopr(len); 1254 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1255 atomic_add_long(&anon_segkp_pages_locked, -pages); 1256 page_unresv(pages); 1257 } 1258 return (0); 1259 } 1260 1261 /* 1262 * Insert the kpd in the hash table. 1263 */ 1264 static void 1265 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1266 { 1267 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1268 int index; 1269 1270 /* 1271 * Insert the kpd based on the address that will be returned 1272 * via segkp_release. 1273 */ 1274 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1275 mutex_enter(&segkp_lock); 1276 kpd->kp_next = kpsd->kpsd_hash[index]; 1277 kpsd->kpsd_hash[index] = kpd; 1278 mutex_exit(&segkp_lock); 1279 } 1280 1281 /* 1282 * Remove kpd from the hash table. 1283 */ 1284 static void 1285 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1286 { 1287 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1288 struct segkp_data **kpp; 1289 int index; 1290 1291 ASSERT(MUTEX_HELD(&segkp_lock)); 1292 1293 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1294 for (kpp = &kpsd->kpsd_hash[index]; 1295 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1296 if (*kpp == kpd) { 1297 *kpp = kpd->kp_next; 1298 return; 1299 } 1300 } 1301 panic("segkp_delete: unable to find element to delete"); 1302 /*NOTREACHED*/ 1303 } 1304 1305 /* 1306 * Find the kpd associated with a vaddr. 1307 * 1308 * Most of the callers of segkp_find will pass the vaddr that 1309 * hashes to the desired index, but there are cases where 1310 * this is not true in which case we have to (potentially) scan 1311 * the whole table looking for it. This should be very rare 1312 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1313 * middle of the segkp_data region). 1314 */ 1315 static struct segkp_data * 1316 segkp_find(struct seg *seg, caddr_t vaddr) 1317 { 1318 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1319 struct segkp_data *kpd; 1320 int i; 1321 int stop; 1322 1323 i = stop = SEGKP_HASH(vaddr); 1324 mutex_enter(&segkp_lock); 1325 do { 1326 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1327 kpd = kpd->kp_next) { 1328 if (vaddr >= kpd->kp_base && 1329 vaddr < kpd->kp_base + kpd->kp_len) { 1330 mutex_exit(&segkp_lock); 1331 return (kpd); 1332 } 1333 } 1334 if (--i < 0) 1335 i = SEGKP_HASHSZ - 1; /* Wrap */ 1336 } while (i != stop); 1337 mutex_exit(&segkp_lock); 1338 return (NULL); /* Not found */ 1339 } 1340 1341 /* 1342 * returns size of swappable area. 1343 */ 1344 size_t 1345 swapsize(caddr_t v) 1346 { 1347 struct segkp_data *kpd; 1348 1349 if ((kpd = segkp_find(segkp, v)) != NULL) 1350 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1351 else 1352 return (NULL); 1353 } 1354 1355 /* 1356 * Dump out all the active segkp pages 1357 */ 1358 static void 1359 segkp_dump(struct seg *seg) 1360 { 1361 int i; 1362 struct segkp_data *kpd; 1363 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1364 1365 for (i = 0; i < SEGKP_HASHSZ; i++) { 1366 for (kpd = kpsd->kpsd_hash[i]; 1367 kpd != NULL; kpd = kpd->kp_next) { 1368 pfn_t pfn; 1369 caddr_t addr; 1370 caddr_t eaddr; 1371 1372 addr = kpd->kp_base; 1373 eaddr = addr + kpd->kp_len; 1374 while (addr < eaddr) { 1375 ASSERT(seg->s_as == &kas); 1376 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1377 if (pfn != PFN_INVALID) 1378 dump_addpage(seg->s_as, addr, pfn); 1379 addr += PAGESIZE; 1380 dump_timeleft = dump_timeout; 1381 } 1382 } 1383 } 1384 } 1385 1386 /*ARGSUSED*/ 1387 static int 1388 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1389 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1390 { 1391 return (ENOTSUP); 1392 } 1393 1394 #include <sys/mem_config.h> 1395 1396 /*ARGSUSED*/ 1397 static void 1398 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1399 {} 1400 1401 /* 1402 * During memory delete, turn off caches so that pages are not held. 1403 * A better solution may be to unlock the pages while they are 1404 * in the cache so that they may be collected naturally. 1405 */ 1406 1407 /*ARGSUSED*/ 1408 static int 1409 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1410 { 1411 atomic_inc_32(&segkp_indel); 1412 segkp_cache_free(); 1413 return (0); 1414 } 1415 1416 /*ARGSUSED*/ 1417 static void 1418 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1419 { 1420 atomic_dec_32(&segkp_indel); 1421 } 1422 1423 static kphysm_setup_vector_t segkp_mem_config_vec = { 1424 KPHYSM_SETUP_VECTOR_VERSION, 1425 segkp_mem_config_post_add, 1426 segkp_mem_config_pre_del, 1427 segkp_mem_config_post_del, 1428 }; 1429 1430 static void 1431 segkpinit_mem_config(struct seg *seg) 1432 { 1433 int ret; 1434 1435 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1436 ASSERT(ret == 0); 1437 }