Print this page
patch fixes
6345 remove xhat support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/watchpoint.c
+++ new/usr/src/uts/common/os/watchpoint.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 27 #include <sys/types.h>
30 28 #include <sys/t_lock.h>
31 29 #include <sys/param.h>
32 30 #include <sys/cred.h>
33 31 #include <sys/debug.h>
34 32 #include <sys/inline.h>
35 33 #include <sys/kmem.h>
36 34 #include <sys/proc.h>
37 35 #include <sys/regset.h>
38 36 #include <sys/sysmacros.h>
39 37 #include <sys/systm.h>
40 38 #include <sys/prsystm.h>
41 39 #include <sys/buf.h>
42 40 #include <sys/signal.h>
43 41 #include <sys/user.h>
44 42 #include <sys/cpuvar.h>
45 43
46 44 #include <sys/fault.h>
47 45 #include <sys/syscall.h>
48 46 #include <sys/procfs.h>
49 47 #include <sys/cmn_err.h>
50 48 #include <sys/stack.h>
51 49 #include <sys/watchpoint.h>
52 50 #include <sys/copyops.h>
53 51 #include <sys/schedctl.h>
54 52
55 53 #include <sys/mman.h>
56 54 #include <vm/as.h>
57 55 #include <vm/seg.h>
58 56
59 57 /*
60 58 * Copy ops vector for watchpoints.
61 59 */
62 60 static int watch_copyin(const void *, void *, size_t);
63 61 static int watch_xcopyin(const void *, void *, size_t);
64 62 static int watch_copyout(const void *, void *, size_t);
65 63 static int watch_xcopyout(const void *, void *, size_t);
66 64 static int watch_copyinstr(const char *, char *, size_t, size_t *);
67 65 static int watch_copyoutstr(const char *, char *, size_t, size_t *);
68 66 static int watch_fuword8(const void *, uint8_t *);
69 67 static int watch_fuword16(const void *, uint16_t *);
70 68 static int watch_fuword32(const void *, uint32_t *);
71 69 static int watch_suword8(void *, uint8_t);
72 70 static int watch_suword16(void *, uint16_t);
73 71 static int watch_suword32(void *, uint32_t);
74 72 static int watch_physio(int (*)(struct buf *), struct buf *,
75 73 dev_t, int, void (*)(struct buf *), struct uio *);
76 74 #ifdef _LP64
77 75 static int watch_fuword64(const void *, uint64_t *);
78 76 static int watch_suword64(void *, uint64_t);
79 77 #endif
80 78
81 79 struct copyops watch_copyops = {
82 80 watch_copyin,
83 81 watch_xcopyin,
84 82 watch_copyout,
85 83 watch_xcopyout,
86 84 watch_copyinstr,
87 85 watch_copyoutstr,
88 86 watch_fuword8,
89 87 watch_fuword16,
90 88 watch_fuword32,
91 89 #ifdef _LP64
92 90 watch_fuword64,
93 91 #else
94 92 NULL,
95 93 #endif
96 94 watch_suword8,
97 95 watch_suword16,
98 96 watch_suword32,
99 97 #ifdef _LP64
100 98 watch_suword64,
101 99 #else
102 100 NULL,
103 101 #endif
104 102 watch_physio
105 103 };
106 104
107 105 /*
108 106 * Map the 'rw' argument to a protection flag.
109 107 */
110 108 static int
111 109 rw_to_prot(enum seg_rw rw)
112 110 {
113 111 switch (rw) {
114 112 case S_EXEC:
115 113 return (PROT_EXEC);
116 114 case S_READ:
117 115 return (PROT_READ);
118 116 case S_WRITE:
119 117 return (PROT_WRITE);
120 118 default:
121 119 return (PROT_NONE); /* can't happen */
122 120 }
123 121 }
124 122
125 123 /*
126 124 * Map the 'rw' argument to an index into an array of exec/write/read things.
127 125 * The index follows the precedence order: exec .. write .. read
128 126 */
129 127 static int
130 128 rw_to_index(enum seg_rw rw)
131 129 {
132 130 switch (rw) {
133 131 default: /* default case "can't happen" */
134 132 case S_EXEC:
135 133 return (0);
136 134 case S_WRITE:
137 135 return (1);
138 136 case S_READ:
139 137 return (2);
140 138 }
141 139 }
142 140
143 141 /*
144 142 * Map an index back to a seg_rw.
145 143 */
146 144 static enum seg_rw S_rw[4] = {
147 145 S_EXEC,
148 146 S_WRITE,
149 147 S_READ,
150 148 S_READ,
151 149 };
152 150
153 151 #define X 0
154 152 #define W 1
155 153 #define R 2
156 154 #define sum(a) (a[X] + a[W] + a[R])
157 155
158 156 /*
159 157 * Common code for pr_mappage() and pr_unmappage().
160 158 */
161 159 static int
162 160 pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
163 161 {
164 162 proc_t *p = curproc;
165 163 struct as *as = p->p_as;
166 164 char *eaddr = addr + size;
167 165 int prot_rw = rw_to_prot(rw);
168 166 int xrw = rw_to_index(rw);
169 167 int rv = 0;
170 168 struct watched_page *pwp;
171 169 struct watched_page tpw;
172 170 avl_index_t where;
173 171 uint_t prot;
174 172
↓ open down ↓ |
136 lines elided |
↑ open up ↑ |
175 173 ASSERT(as != &kas);
176 174
177 175 startover:
178 176 ASSERT(rv == 0);
179 177 if (avl_numnodes(&as->a_wpage) == 0)
180 178 return (0);
181 179
182 180 /*
183 181 * as->a_wpage can only be changed while the process is totally stopped.
184 182 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 - * space lock leads to deadlocks with the clock thread. Note that if an
186 - * as_fault() is servicing a fault to a watched page on behalf of an
187 - * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188 - * will be set to wp_oprot). Since this is done while holding as writer
189 - * lock, we need to grab as lock (reader lock is good enough).
183 + * space lock leads to deadlocks with the clock thread.
190 184 *
191 185 * p_maplock prevents simultaneous execution of this function. Under
192 186 * normal circumstances, holdwatch() will stop all other threads, so the
193 187 * lock isn't really needed. But there may be multiple threads within
194 188 * stop() when SWATCHOK is set, so we need to handle multiple threads
195 189 * at once. See holdwatch() for the details of this dance.
196 190 */
197 191
198 192 mutex_enter(&p->p_maplock);
199 - AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
200 193
201 194 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202 195 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203 196 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204 197
205 198 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206 199 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207 200
208 201 /*
209 202 * If the requested protection has not been
210 203 * removed, we need not remap this page.
211 204 */
212 205 prot = pwp->wp_prot;
213 206 if (kernel || (prot & PROT_USER))
214 207 if (prot & prot_rw)
215 208 continue;
216 209 /*
217 210 * If the requested access does not exist in the page's
218 211 * original protections, we need not remap this page.
219 212 * If the page does not exist yet, we can't test it.
220 213 */
221 214 if ((prot = pwp->wp_oprot) != 0) {
222 215 if (!(kernel || (prot & PROT_USER)))
223 216 continue;
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
224 217 if (!(prot & prot_rw))
225 218 continue;
226 219 }
227 220
228 221 if (mapin) {
229 222 /*
230 223 * Before mapping the page in, ensure that
231 224 * all other lwps are held in the kernel.
232 225 */
233 226 if (p->p_mapcnt == 0) {
234 - /*
235 - * Release as lock while in holdwatch()
236 - * in case other threads need to grab it.
237 - */
238 - AS_LOCK_EXIT(as, &as->a_lock);
239 227 mutex_exit(&p->p_maplock);
240 228 if (holdwatch() != 0) {
241 229 /*
242 230 * We stopped in holdwatch().
243 231 * Start all over again because the
244 232 * watched page list may have changed.
245 233 */
246 234 goto startover;
247 235 }
248 236 mutex_enter(&p->p_maplock);
249 - AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
250 237 }
251 238 p->p_mapcnt++;
252 239 }
253 240
254 241 addr = pwp->wp_vaddr;
255 242 rv++;
256 243
257 244 prot = pwp->wp_prot;
258 245 if (mapin) {
259 246 if (kernel)
260 247 pwp->wp_kmap[xrw]++;
261 248 else
262 249 pwp->wp_umap[xrw]++;
263 250 pwp->wp_flags |= WP_NOWATCH;
264 251 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265 252 /* cannot have exec-only protection */
266 253 prot |= PROT_READ|PROT_EXEC;
267 254 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268 255 prot |= PROT_READ;
269 256 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
270 257 /* cannot have write-only protection */
271 258 prot |= PROT_READ|PROT_WRITE;
272 259 #if 0 /* damned broken mmu feature! */
273 260 if (sum(pwp->wp_umap) == 0)
274 261 prot &= ~PROT_USER;
275 262 #endif
276 263 } else {
277 264 ASSERT(pwp->wp_flags & WP_NOWATCH);
278 265 if (kernel) {
279 266 ASSERT(pwp->wp_kmap[xrw] != 0);
280 267 --pwp->wp_kmap[xrw];
281 268 } else {
282 269 ASSERT(pwp->wp_umap[xrw] != 0);
283 270 --pwp->wp_umap[xrw];
284 271 }
285 272 if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
286 273 pwp->wp_flags &= ~WP_NOWATCH;
287 274 else {
288 275 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
289 276 /* cannot have exec-only protection */
290 277 prot |= PROT_READ|PROT_EXEC;
291 278 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292 279 prot |= PROT_READ;
293 280 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294 281 /* cannot have write-only protection */
295 282 prot |= PROT_READ|PROT_WRITE;
296 283 #if 0 /* damned broken mmu feature! */
297 284 if (sum(pwp->wp_umap) == 0)
298 285 prot &= ~PROT_USER;
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
299 286 #endif
300 287 }
301 288 }
302 289
303 290
304 291 if (pwp->wp_oprot != 0) { /* if page exists */
305 292 struct seg *seg;
306 293 uint_t oprot;
307 294 int err, retrycnt = 0;
308 295
309 - AS_LOCK_EXIT(as, &as->a_lock);
310 296 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
311 297 retry:
312 298 seg = as_segat(as, addr);
313 299 ASSERT(seg != NULL);
314 300 SEGOP_GETPROT(seg, addr, 0, &oprot);
315 301 if (prot != oprot) {
316 302 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
317 303 if (err == IE_RETRY) {
318 304 ASSERT(retrycnt == 0);
319 305 retrycnt++;
320 306 goto retry;
321 307 }
322 308 }
323 309 AS_LOCK_EXIT(as, &as->a_lock);
324 - } else
325 - AS_LOCK_EXIT(as, &as->a_lock);
310 + }
326 311
327 312 /*
328 313 * When all pages are mapped back to their normal state,
329 314 * continue the other lwps.
330 315 */
331 316 if (!mapin) {
332 317 ASSERT(p->p_mapcnt > 0);
333 318 p->p_mapcnt--;
334 319 if (p->p_mapcnt == 0) {
335 320 mutex_exit(&p->p_maplock);
336 321 mutex_enter(&p->p_lock);
337 322 continuelwps(p);
338 323 mutex_exit(&p->p_lock);
339 324 mutex_enter(&p->p_maplock);
340 325 }
341 326 }
342 -
343 - AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
344 327 }
345 328
346 - AS_LOCK_EXIT(as, &as->a_lock);
347 329 mutex_exit(&p->p_maplock);
348 330
349 331 return (rv);
350 332 }
351 333
352 334 /*
353 335 * Restore the original page protections on an address range.
354 336 * If 'kernel' is non-zero, just do it for the kernel.
355 337 * pr_mappage() returns non-zero if it actually changed anything.
356 338 *
357 339 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358 340 * but pairs may be nested within other pairs. The reference counts
359 341 * sort it all out. See pr_do_mappage(), above.
360 342 */
361 343 static int
362 344 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363 345 {
364 346 return (pr_do_mappage(addr, size, 1, rw, kernel));
365 347 }
366 348
367 349 /*
368 350 * Set the modified page protections on a watched page.
369 351 * Inverse of pr_mappage().
370 352 * Needs to be called only if pr_mappage() returned non-zero.
371 353 */
372 354 static void
373 355 pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
374 356 {
375 357 (void) pr_do_mappage(addr, size, 0, rw, kernel);
376 358 }
377 359
378 360 /*
379 361 * Function called by an lwp after it resumes from stop().
380 362 */
381 363 void
382 364 setallwatch(void)
383 365 {
384 366 proc_t *p = curproc;
385 367 struct as *as = curproc->p_as;
386 368 struct watched_page *pwp, *next;
387 369 struct seg *seg;
388 370 caddr_t vaddr;
389 371 uint_t prot;
390 372 int err, retrycnt;
391 373
392 374 if (p->p_wprot == NULL)
393 375 return;
394 376
395 377 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396 378
397 379 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
398 380
399 381 pwp = p->p_wprot;
400 382 while (pwp != NULL) {
401 383
402 384 vaddr = pwp->wp_vaddr;
403 385 retrycnt = 0;
404 386 retry:
405 387 ASSERT(pwp->wp_flags & WP_SETPROT);
406 388 if ((seg = as_segat(as, vaddr)) != NULL &&
407 389 !(pwp->wp_flags & WP_NOWATCH)) {
408 390 prot = pwp->wp_prot;
409 391 err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
410 392 if (err == IE_RETRY) {
411 393 ASSERT(retrycnt == 0);
412 394 retrycnt++;
413 395 goto retry;
414 396 }
415 397 }
416 398
417 399 next = pwp->wp_list;
418 400
419 401 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420 402 /*
421 403 * No watched areas remain in this page.
422 404 * Free the watched_page structure.
423 405 */
424 406 avl_remove(&as->a_wpage, pwp);
425 407 kmem_free(pwp, sizeof (struct watched_page));
426 408 } else {
427 409 pwp->wp_flags &= ~WP_SETPROT;
428 410 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
429 411
430 412 pwp = next;
431 413 }
432 414 p->p_wprot = NULL;
433 415
434 416 AS_LOCK_EXIT(as, &as->a_lock);
435 417 }
436 418
437 419
438 420
439 -/* Must be called with as lock held */
440 421 int
441 422 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442 423 {
443 424 register struct watched_page *pwp;
444 425 struct watched_page tpw;
445 426 uint_t prot;
446 427 int rv = 0;
447 428
448 429 switch (rw) {
449 430 case S_READ:
450 431 case S_WRITE:
451 432 case S_EXEC:
452 433 break;
453 434 default:
454 435 return (0);
455 436 }
456 437
457 438 /*
458 439 * as->a_wpage can only be modified while the process is totally
459 440 * stopped. We need, and should use, no locks here.
460 441 */
461 442 if (as != &kas && avl_numnodes(&as->a_wpage) != 0) {
462 443 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
463 444 pwp = avl_find(&as->a_wpage, &tpw, NULL);
464 445 if (pwp != NULL) {
465 446 ASSERT(addr >= pwp->wp_vaddr &&
466 447 addr < pwp->wp_vaddr + PAGESIZE);
467 448 if (pwp->wp_oprot != 0) {
468 449 prot = pwp->wp_prot;
469 450 switch (rw) {
470 451 case S_READ:
471 452 rv = ((prot & (PROT_USER|PROT_READ))
472 453 != (PROT_USER|PROT_READ));
473 454 break;
474 455 case S_WRITE:
475 456 rv = ((prot & (PROT_USER|PROT_WRITE))
476 457 != (PROT_USER|PROT_WRITE));
477 458 break;
478 459 case S_EXEC:
479 460 rv = ((prot & (PROT_USER|PROT_EXEC))
480 461 != (PROT_USER|PROT_EXEC));
481 462 break;
482 463 default:
483 464 /* can't happen! */
484 465 break;
485 466 }
486 467 }
487 468 }
488 469 }
489 470
490 471 return (rv);
491 472 }
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
492 473
493 474
494 475 /*
495 476 * trap() calls here to determine if a fault is in a watched page.
496 477 * We return nonzero if this is true and the load/store would fail.
497 478 */
498 479 int
499 480 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500 481 {
501 482 struct as *as = curproc->p_as;
502 - int rv;
503 483
504 484 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505 485 return (0);
506 486
507 - /* Grab the lock because of XHAT (see comment in pr_mappage()) */
508 - AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
509 - rv = pr_is_watchpage_as(addr, rw, as);
510 - AS_LOCK_EXIT(as, &as->a_lock);
511 -
512 - return (rv);
487 + return (pr_is_watchpage_as(addr, rw, as));
513 488 }
514 489
515 490
516 491
517 492 /*
518 493 * trap() calls here to determine if a fault is a watchpoint.
519 494 */
520 495 int
521 496 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522 497 enum seg_rw rw)
523 498 {
524 499 proc_t *p = curproc;
525 500 caddr_t addr = *paddr;
526 501 caddr_t eaddr = addr + size;
527 502 register struct watched_area *pwa;
528 503 struct watched_area twa;
529 504 int rv = 0;
530 505 int ta = 0;
531 506 size_t len = 0;
532 507
533 508 switch (rw) {
534 509 case S_READ:
535 510 case S_WRITE:
536 511 case S_EXEC:
537 512 break;
538 513 default:
539 514 *pta = 0;
540 515 return (0);
541 516 }
542 517
543 518 /*
544 519 * p->p_warea is protected by p->p_lock.
545 520 */
546 521 mutex_enter(&p->p_lock);
547 522
548 523 /* BEGIN CSTYLED */
549 524 /*
550 525 * This loop is somewhat complicated because the fault region can span
551 526 * multiple watched areas. For example:
552 527 *
553 528 * addr eaddr
554 529 * +-----------------+
555 530 * | fault region |
556 531 * +-------+--------+----+---+------------+
557 532 * | prot not right | | prot correct |
558 533 * +----------------+ +----------------+
559 534 * wa_vaddr wa_eaddr
560 535 * wa_vaddr wa_eaddr
561 536 *
562 537 * We start at the area greater than or equal to the starting address.
563 538 * As long as some portion of the fault region overlaps the current
564 539 * area, we continue checking permissions until we find an appropriate
565 540 * match.
566 541 */
567 542 /* END CSTYLED */
568 543 twa.wa_vaddr = addr;
569 544 twa.wa_eaddr = eaddr;
570 545
571 546 for (pwa = pr_find_watched_area(p, &twa, NULL);
572 547 pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr;
573 548 pwa = AVL_NEXT(&p->p_warea, pwa)) {
574 549
575 550 switch (rw) {
576 551 case S_READ:
577 552 if (pwa->wa_flags & WA_READ)
578 553 rv = TRAP_RWATCH;
579 554 break;
580 555 case S_WRITE:
581 556 if (pwa->wa_flags & WA_WRITE)
582 557 rv = TRAP_WWATCH;
583 558 break;
584 559 case S_EXEC:
585 560 if (pwa->wa_flags & WA_EXEC)
586 561 rv = TRAP_XWATCH;
587 562 break;
588 563 default:
589 564 /* can't happen */
590 565 break;
591 566 }
592 567
593 568 /*
594 569 * If protections didn't match, check the next watched
595 570 * area
596 571 */
597 572 if (rv != 0) {
598 573 if (addr < pwa->wa_vaddr)
599 574 addr = pwa->wa_vaddr;
600 575 len = pwa->wa_eaddr - addr;
601 576 if (pwa->wa_flags & WA_TRAPAFTER)
602 577 ta = 1;
603 578 break;
604 579 }
605 580 }
606 581
607 582 mutex_exit(&p->p_lock);
608 583
609 584 *paddr = addr;
610 585 *pta = ta;
611 586 if (plen != NULL)
612 587 *plen = len;
613 588 return (rv);
614 589 }
615 590
616 591 /*
617 592 * Set up to perform a single-step at user level for the
618 593 * case of a trapafter watchpoint. Called from trap().
619 594 */
620 595 void
621 596 do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw,
622 597 int watchcode, greg_t pc)
623 598 {
624 599 register klwp_t *lwp = ttolwp(curthread);
625 600 struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)];
626 601
627 602 /*
628 603 * Check to see if we are already performing this special
629 604 * watchpoint single-step. We must not do pr_mappage() twice.
630 605 */
631 606
632 607 /* special check for two read traps on the same instruction */
633 608 if (rw == S_READ && pw->wpaddr != NULL &&
634 609 !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) {
635 610 ASSERT(lwp->lwp_watchtrap != 0);
636 611 pw++; /* use the extra S_READ struct */
637 612 }
638 613
639 614 if (pw->wpaddr != NULL) {
640 615 ASSERT(lwp->lwp_watchtrap != 0);
641 616 ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize);
642 617 if (pw->wpcode == 0) {
643 618 pw->wpcode = watchcode;
644 619 pw->wppc = pc;
645 620 }
646 621 } else {
647 622 int mapped = pr_mappage(vaddr, sz, rw, 0);
648 623 prstep(lwp, 1);
649 624 lwp->lwp_watchtrap = 1;
650 625 pw->wpaddr = vaddr;
651 626 pw->wpsize = sz;
652 627 pw->wpcode = watchcode;
653 628 pw->wpmapped = mapped;
654 629 pw->wppc = pc;
655 630 }
656 631 }
657 632
658 633 /*
659 634 * Undo the effects of do_watch_step().
660 635 * Called from trap() after the single-step is finished.
661 636 * Also called from issig_forreal() and stop() with a NULL
662 637 * argument to avoid having these things set more than once.
663 638 */
664 639 int
665 640 undo_watch_step(k_siginfo_t *sip)
666 641 {
667 642 register klwp_t *lwp = ttolwp(curthread);
668 643 int fault = 0;
669 644
670 645 if (lwp->lwp_watchtrap) {
671 646 struct lwp_watch *pw = lwp->lwp_watch;
672 647 int i;
673 648
674 649 for (i = 0; i < 4; i++, pw++) {
675 650 if (pw->wpaddr == NULL)
676 651 continue;
677 652 if (pw->wpmapped)
678 653 pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i],
679 654 0);
680 655 if (pw->wpcode != 0) {
681 656 if (sip != NULL) {
682 657 sip->si_signo = SIGTRAP;
683 658 sip->si_code = pw->wpcode;
684 659 sip->si_addr = pw->wpaddr;
685 660 sip->si_trapafter = 1;
686 661 sip->si_pc = (caddr_t)pw->wppc;
687 662 }
688 663 fault = FLTWATCH;
689 664 pw->wpcode = 0;
690 665 }
691 666 pw->wpaddr = NULL;
692 667 pw->wpsize = 0;
693 668 pw->wpmapped = 0;
694 669 }
695 670 lwp->lwp_watchtrap = 0;
696 671 }
697 672
698 673 return (fault);
699 674 }
700 675
701 676 /*
702 677 * Handle a watchpoint that occurs while doing copyin()
703 678 * or copyout() in a system call.
704 679 * Return non-zero if the fault or signal is cleared
705 680 * by a debugger while the lwp is stopped.
706 681 */
707 682 static int
708 683 sys_watchpoint(caddr_t addr, int watchcode, int ta)
709 684 {
710 685 extern greg_t getuserpc(void); /* XXX header file */
711 686 k_sigset_t smask;
712 687 register proc_t *p = ttoproc(curthread);
713 688 register klwp_t *lwp = ttolwp(curthread);
714 689 register sigqueue_t *sqp;
715 690 int rval;
716 691
717 692 /* assert no locks are held */
718 693 /* ASSERT(curthread->t_nlocks == 0); */
719 694
720 695 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
721 696 sqp->sq_info.si_signo = SIGTRAP;
722 697 sqp->sq_info.si_code = watchcode;
723 698 sqp->sq_info.si_addr = addr;
724 699 sqp->sq_info.si_trapafter = ta;
725 700 sqp->sq_info.si_pc = (caddr_t)getuserpc();
726 701
727 702 mutex_enter(&p->p_lock);
728 703
729 704 /* this will be tested and cleared by the caller */
730 705 lwp->lwp_sysabort = 0;
731 706
732 707 if (prismember(&p->p_fltmask, FLTWATCH)) {
733 708 lwp->lwp_curflt = (uchar_t)FLTWATCH;
734 709 lwp->lwp_siginfo = sqp->sq_info;
735 710 stop(PR_FAULTED, FLTWATCH);
736 711 if (lwp->lwp_curflt == 0) {
737 712 mutex_exit(&p->p_lock);
738 713 kmem_free(sqp, sizeof (sigqueue_t));
739 714 return (1);
740 715 }
741 716 lwp->lwp_curflt = 0;
742 717 }
743 718
744 719 /*
745 720 * post the SIGTRAP signal.
746 721 * Block all other signals so we only stop showing SIGTRAP.
747 722 */
748 723 if (signal_is_blocked(curthread, SIGTRAP) ||
749 724 sigismember(&p->p_ignore, SIGTRAP)) {
750 725 /* SIGTRAP is blocked or ignored, forget the rest. */
751 726 mutex_exit(&p->p_lock);
752 727 kmem_free(sqp, sizeof (sigqueue_t));
753 728 return (0);
754 729 }
755 730 sigdelq(p, curthread, SIGTRAP);
756 731 sigaddqa(p, curthread, sqp);
757 732 schedctl_finish_sigblock(curthread);
758 733 smask = curthread->t_hold;
759 734 sigfillset(&curthread->t_hold);
760 735 sigdiffset(&curthread->t_hold, &cantmask);
761 736 sigdelset(&curthread->t_hold, SIGTRAP);
762 737 mutex_exit(&p->p_lock);
763 738
764 739 rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1);
765 740
766 741 /* restore the original signal mask */
767 742 mutex_enter(&p->p_lock);
768 743 curthread->t_hold = smask;
769 744 mutex_exit(&p->p_lock);
770 745
771 746 return (rval);
772 747 }
773 748
774 749 /*
775 750 * Wrappers for the copyin()/copyout() functions to deal
776 751 * with watchpoints that fire while in system calls.
777 752 */
778 753
779 754 static int
780 755 watch_xcopyin(const void *uaddr, void *kaddr, size_t count)
781 756 {
782 757 klwp_t *lwp = ttolwp(curthread);
783 758 caddr_t watch_uaddr = (caddr_t)uaddr;
784 759 caddr_t watch_kaddr = (caddr_t)kaddr;
785 760 int error = 0;
786 761 label_t ljb;
787 762 size_t part;
788 763 int mapped;
789 764
790 765 while (count && error == 0) {
791 766 int watchcode;
792 767 caddr_t vaddr;
793 768 size_t len;
794 769 int ta;
795 770
796 771 if ((part = PAGESIZE -
797 772 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
798 773 part = count;
799 774
800 775 if (!pr_is_watchpage(watch_uaddr, S_READ))
801 776 watchcode = 0;
802 777 else {
803 778 vaddr = watch_uaddr;
804 779 watchcode = pr_is_watchpoint(&vaddr, &ta,
805 780 part, &len, S_READ);
806 781 if (watchcode && ta == 0)
807 782 part = vaddr - watch_uaddr;
808 783 }
809 784
810 785 /*
811 786 * Copy the initial part, up to a watched address, if any.
812 787 */
813 788 if (part != 0) {
814 789 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
815 790 if (on_fault(&ljb))
816 791 error = EFAULT;
817 792 else
818 793 copyin_noerr(watch_uaddr, watch_kaddr, part);
819 794 no_fault();
820 795 if (mapped)
821 796 pr_unmappage(watch_uaddr, part, S_READ, 1);
822 797 watch_uaddr += part;
823 798 watch_kaddr += part;
824 799 count -= part;
825 800 }
826 801 /*
827 802 * If trapafter was specified, then copy through the
828 803 * watched area before taking the watchpoint trap.
829 804 */
830 805 while (count && watchcode && ta && len > part && error == 0) {
831 806 len -= part;
832 807 if ((part = PAGESIZE) > count)
833 808 part = count;
834 809 if (part > len)
835 810 part = len;
836 811 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
837 812 if (on_fault(&ljb))
838 813 error = EFAULT;
839 814 else
840 815 copyin_noerr(watch_uaddr, watch_kaddr, part);
841 816 no_fault();
842 817 if (mapped)
843 818 pr_unmappage(watch_uaddr, part, S_READ, 1);
844 819 watch_uaddr += part;
845 820 watch_kaddr += part;
846 821 count -= part;
847 822 }
848 823
849 824 error:
850 825 /* if we hit a watched address, do the watchpoint logic */
851 826 if (watchcode &&
852 827 (!sys_watchpoint(vaddr, watchcode, ta) ||
853 828 lwp->lwp_sysabort)) {
854 829 lwp->lwp_sysabort = 0;
855 830 error = EFAULT;
856 831 break;
857 832 }
858 833 }
859 834
860 835 return (error);
861 836 }
862 837
863 838 static int
864 839 watch_copyin(const void *kaddr, void *uaddr, size_t count)
865 840 {
866 841 return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0);
867 842 }
868 843
869 844
870 845 static int
871 846 watch_xcopyout(const void *kaddr, void *uaddr, size_t count)
872 847 {
873 848 klwp_t *lwp = ttolwp(curthread);
874 849 caddr_t watch_uaddr = (caddr_t)uaddr;
875 850 caddr_t watch_kaddr = (caddr_t)kaddr;
876 851 int error = 0;
877 852 label_t ljb;
878 853
879 854 while (count && error == 0) {
880 855 int watchcode;
881 856 caddr_t vaddr;
882 857 size_t part;
883 858 size_t len;
884 859 int ta;
885 860 int mapped;
886 861
887 862 if ((part = PAGESIZE -
888 863 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
889 864 part = count;
890 865
891 866 if (!pr_is_watchpage(watch_uaddr, S_WRITE))
892 867 watchcode = 0;
893 868 else {
894 869 vaddr = watch_uaddr;
895 870 watchcode = pr_is_watchpoint(&vaddr, &ta,
896 871 part, &len, S_WRITE);
897 872 if (watchcode) {
898 873 if (ta == 0)
899 874 part = vaddr - watch_uaddr;
900 875 else {
901 876 len += vaddr - watch_uaddr;
902 877 if (part > len)
903 878 part = len;
904 879 }
905 880 }
906 881 }
907 882
908 883 /*
909 884 * Copy the initial part, up to a watched address, if any.
910 885 */
911 886 if (part != 0) {
912 887 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
913 888 if (on_fault(&ljb))
914 889 error = EFAULT;
915 890 else
916 891 copyout_noerr(watch_kaddr, watch_uaddr, part);
917 892 no_fault();
918 893 if (mapped)
919 894 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
920 895 watch_uaddr += part;
921 896 watch_kaddr += part;
922 897 count -= part;
923 898 }
924 899
925 900 /*
926 901 * If trapafter was specified, then copy through the
927 902 * watched area before taking the watchpoint trap.
928 903 */
929 904 while (count && watchcode && ta && len > part && error == 0) {
930 905 len -= part;
931 906 if ((part = PAGESIZE) > count)
932 907 part = count;
933 908 if (part > len)
934 909 part = len;
935 910 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
936 911 if (on_fault(&ljb))
937 912 error = EFAULT;
938 913 else
939 914 copyout_noerr(watch_kaddr, watch_uaddr, part);
940 915 no_fault();
941 916 if (mapped)
942 917 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
943 918 watch_uaddr += part;
944 919 watch_kaddr += part;
945 920 count -= part;
946 921 }
947 922
948 923 /* if we hit a watched address, do the watchpoint logic */
949 924 if (watchcode &&
950 925 (!sys_watchpoint(vaddr, watchcode, ta) ||
951 926 lwp->lwp_sysabort)) {
952 927 lwp->lwp_sysabort = 0;
953 928 error = EFAULT;
954 929 break;
955 930 }
956 931 }
957 932
958 933 return (error);
959 934 }
960 935
961 936 static int
962 937 watch_copyout(const void *kaddr, void *uaddr, size_t count)
963 938 {
964 939 return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0);
965 940 }
966 941
967 942 static int
968 943 watch_copyinstr(
969 944 const char *uaddr,
970 945 char *kaddr,
971 946 size_t maxlength,
972 947 size_t *lencopied)
973 948 {
974 949 klwp_t *lwp = ttolwp(curthread);
975 950 size_t resid;
976 951 int error = 0;
977 952 label_t ljb;
978 953
979 954 if ((resid = maxlength) == 0)
980 955 return (ENAMETOOLONG);
981 956
982 957 while (resid && error == 0) {
983 958 int watchcode;
984 959 caddr_t vaddr;
985 960 size_t part;
986 961 size_t len;
987 962 size_t size;
988 963 int ta;
989 964 int mapped;
990 965
991 966 if ((part = PAGESIZE -
992 967 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
993 968 part = resid;
994 969
995 970 if (!pr_is_watchpage((caddr_t)uaddr, S_READ))
996 971 watchcode = 0;
997 972 else {
998 973 vaddr = (caddr_t)uaddr;
999 974 watchcode = pr_is_watchpoint(&vaddr, &ta,
1000 975 part, &len, S_READ);
1001 976 if (watchcode) {
1002 977 if (ta == 0)
1003 978 part = vaddr - uaddr;
1004 979 else {
1005 980 len += vaddr - uaddr;
1006 981 if (part > len)
1007 982 part = len;
1008 983 }
1009 984 }
1010 985 }
1011 986
1012 987 /*
1013 988 * Copy the initial part, up to a watched address, if any.
1014 989 */
1015 990 if (part != 0) {
1016 991 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1017 992 if (on_fault(&ljb))
1018 993 error = EFAULT;
1019 994 else
1020 995 error = copyinstr_noerr(uaddr, kaddr, part,
1021 996 &size);
1022 997 no_fault();
1023 998 if (mapped)
1024 999 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1025 1000 uaddr += size;
1026 1001 kaddr += size;
1027 1002 resid -= size;
1028 1003 if (error == ENAMETOOLONG && resid > 0)
1029 1004 error = 0;
1030 1005 if (error != 0 || (watchcode &&
1031 1006 (uaddr < vaddr || kaddr[-1] == '\0')))
1032 1007 break; /* didn't reach the watched area */
1033 1008 }
1034 1009
1035 1010 /*
1036 1011 * If trapafter was specified, then copy through the
1037 1012 * watched area before taking the watchpoint trap.
1038 1013 */
1039 1014 while (resid && watchcode && ta && len > part && error == 0 &&
1040 1015 size == part && kaddr[-1] != '\0') {
1041 1016 len -= part;
1042 1017 if ((part = PAGESIZE) > resid)
1043 1018 part = resid;
1044 1019 if (part > len)
1045 1020 part = len;
1046 1021 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1047 1022 if (on_fault(&ljb))
1048 1023 error = EFAULT;
1049 1024 else
1050 1025 error = copyinstr_noerr(uaddr, kaddr, part,
1051 1026 &size);
1052 1027 no_fault();
1053 1028 if (mapped)
1054 1029 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1055 1030 uaddr += size;
1056 1031 kaddr += size;
1057 1032 resid -= size;
1058 1033 if (error == ENAMETOOLONG && resid > 0)
1059 1034 error = 0;
1060 1035 }
1061 1036
1062 1037 /* if we hit a watched address, do the watchpoint logic */
1063 1038 if (watchcode &&
1064 1039 (!sys_watchpoint(vaddr, watchcode, ta) ||
1065 1040 lwp->lwp_sysabort)) {
1066 1041 lwp->lwp_sysabort = 0;
1067 1042 error = EFAULT;
1068 1043 break;
1069 1044 }
1070 1045
1071 1046 if (error == 0 && part != 0 &&
1072 1047 (size < part || kaddr[-1] == '\0'))
1073 1048 break;
1074 1049 }
1075 1050
1076 1051 if (error != EFAULT && lencopied)
1077 1052 *lencopied = maxlength - resid;
1078 1053 return (error);
1079 1054 }
1080 1055
1081 1056 static int
1082 1057 watch_copyoutstr(
1083 1058 const char *kaddr,
1084 1059 char *uaddr,
1085 1060 size_t maxlength,
1086 1061 size_t *lencopied)
1087 1062 {
1088 1063 klwp_t *lwp = ttolwp(curthread);
1089 1064 size_t resid;
1090 1065 int error = 0;
1091 1066 label_t ljb;
1092 1067
1093 1068 if ((resid = maxlength) == 0)
1094 1069 return (ENAMETOOLONG);
1095 1070
1096 1071 while (resid && error == 0) {
1097 1072 int watchcode;
1098 1073 caddr_t vaddr;
1099 1074 size_t part;
1100 1075 size_t len;
1101 1076 size_t size;
1102 1077 int ta;
1103 1078 int mapped;
1104 1079
1105 1080 if ((part = PAGESIZE -
1106 1081 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
1107 1082 part = resid;
1108 1083
1109 1084 if (!pr_is_watchpage(uaddr, S_WRITE)) {
1110 1085 watchcode = 0;
1111 1086 } else {
1112 1087 vaddr = uaddr;
1113 1088 watchcode = pr_is_watchpoint(&vaddr, &ta,
1114 1089 part, &len, S_WRITE);
1115 1090 if (watchcode && ta == 0)
1116 1091 part = vaddr - uaddr;
1117 1092 }
1118 1093
1119 1094 /*
1120 1095 * Copy the initial part, up to a watched address, if any.
1121 1096 */
1122 1097 if (part != 0) {
1123 1098 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1124 1099 if (on_fault(&ljb))
1125 1100 error = EFAULT;
1126 1101 else
1127 1102 error = copyoutstr_noerr(kaddr, uaddr, part,
1128 1103 &size);
1129 1104 no_fault();
1130 1105 if (mapped)
1131 1106 pr_unmappage(uaddr, part, S_WRITE, 1);
1132 1107 uaddr += size;
1133 1108 kaddr += size;
1134 1109 resid -= size;
1135 1110 if (error == ENAMETOOLONG && resid > 0)
1136 1111 error = 0;
1137 1112 if (error != 0 || (watchcode &&
1138 1113 (uaddr < vaddr || kaddr[-1] == '\0')))
1139 1114 break; /* didn't reach the watched area */
1140 1115 }
1141 1116
1142 1117 /*
1143 1118 * If trapafter was specified, then copy through the
1144 1119 * watched area before taking the watchpoint trap.
1145 1120 */
1146 1121 while (resid && watchcode && ta && len > part && error == 0 &&
1147 1122 size == part && kaddr[-1] != '\0') {
1148 1123 len -= part;
1149 1124 if ((part = PAGESIZE) > resid)
1150 1125 part = resid;
1151 1126 if (part > len)
1152 1127 part = len;
1153 1128 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1154 1129 if (on_fault(&ljb))
1155 1130 error = EFAULT;
1156 1131 else
1157 1132 error = copyoutstr_noerr(kaddr, uaddr, part,
1158 1133 &size);
1159 1134 no_fault();
1160 1135 if (mapped)
1161 1136 pr_unmappage(uaddr, part, S_WRITE, 1);
1162 1137 uaddr += size;
1163 1138 kaddr += size;
1164 1139 resid -= size;
1165 1140 if (error == ENAMETOOLONG && resid > 0)
1166 1141 error = 0;
1167 1142 }
1168 1143
1169 1144 /* if we hit a watched address, do the watchpoint logic */
1170 1145 if (watchcode &&
1171 1146 (!sys_watchpoint(vaddr, watchcode, ta) ||
1172 1147 lwp->lwp_sysabort)) {
1173 1148 lwp->lwp_sysabort = 0;
1174 1149 error = EFAULT;
1175 1150 break;
1176 1151 }
1177 1152
1178 1153 if (error == 0 && part != 0 &&
1179 1154 (size < part || kaddr[-1] == '\0'))
1180 1155 break;
1181 1156 }
1182 1157
1183 1158 if (error != EFAULT && lencopied)
1184 1159 *lencopied = maxlength - resid;
1185 1160 return (error);
1186 1161 }
1187 1162
1188 1163 typedef int (*fuword_func)(const void *, void *);
1189 1164
1190 1165 /*
1191 1166 * Generic form of watch_fuword8(), watch_fuword16(), etc.
1192 1167 */
1193 1168 static int
1194 1169 watch_fuword(const void *addr, void *dst, fuword_func func, size_t size)
1195 1170 {
1196 1171 klwp_t *lwp = ttolwp(curthread);
1197 1172 int watchcode;
1198 1173 caddr_t vaddr;
1199 1174 int mapped;
1200 1175 int rv = 0;
1201 1176 int ta;
1202 1177 label_t ljb;
1203 1178
1204 1179 for (;;) {
1205 1180
1206 1181 vaddr = (caddr_t)addr;
1207 1182 watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ);
1208 1183 if (watchcode == 0 || ta != 0) {
1209 1184 mapped = pr_mappage((caddr_t)addr, size, S_READ, 1);
1210 1185 if (on_fault(&ljb))
1211 1186 rv = -1;
1212 1187 else
1213 1188 (*func)(addr, dst);
1214 1189 no_fault();
1215 1190 if (mapped)
1216 1191 pr_unmappage((caddr_t)addr, size, S_READ, 1);
1217 1192 }
1218 1193 if (watchcode &&
1219 1194 (!sys_watchpoint(vaddr, watchcode, ta) ||
1220 1195 lwp->lwp_sysabort)) {
1221 1196 lwp->lwp_sysabort = 0;
1222 1197 rv = -1;
1223 1198 break;
1224 1199 }
1225 1200 if (watchcode == 0 || ta != 0)
1226 1201 break;
1227 1202 }
1228 1203
1229 1204 return (rv);
1230 1205 }
1231 1206
1232 1207 static int
1233 1208 watch_fuword8(const void *addr, uint8_t *dst)
1234 1209 {
1235 1210 return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr,
1236 1211 sizeof (*dst)));
1237 1212 }
1238 1213
1239 1214 static int
1240 1215 watch_fuword16(const void *addr, uint16_t *dst)
1241 1216 {
1242 1217 return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr,
1243 1218 sizeof (*dst)));
1244 1219 }
1245 1220
1246 1221 static int
1247 1222 watch_fuword32(const void *addr, uint32_t *dst)
1248 1223 {
1249 1224 return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr,
1250 1225 sizeof (*dst)));
1251 1226 }
1252 1227
1253 1228 #ifdef _LP64
1254 1229 static int
1255 1230 watch_fuword64(const void *addr, uint64_t *dst)
1256 1231 {
1257 1232 return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr,
1258 1233 sizeof (*dst)));
1259 1234 }
1260 1235 #endif
1261 1236
1262 1237
1263 1238 static int
1264 1239 watch_suword8(void *addr, uint8_t value)
1265 1240 {
1266 1241 klwp_t *lwp = ttolwp(curthread);
1267 1242 int watchcode;
1268 1243 caddr_t vaddr;
1269 1244 int mapped;
1270 1245 int rv = 0;
1271 1246 int ta;
1272 1247 label_t ljb;
1273 1248
1274 1249 for (;;) {
1275 1250
1276 1251 vaddr = (caddr_t)addr;
1277 1252 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1278 1253 S_WRITE);
1279 1254 if (watchcode == 0 || ta != 0) {
1280 1255 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1281 1256 S_WRITE, 1);
1282 1257 if (on_fault(&ljb))
1283 1258 rv = -1;
1284 1259 else
1285 1260 suword8_noerr(addr, value);
1286 1261 no_fault();
1287 1262 if (mapped)
1288 1263 pr_unmappage((caddr_t)addr, sizeof (value),
1289 1264 S_WRITE, 1);
1290 1265 }
1291 1266 if (watchcode &&
1292 1267 (!sys_watchpoint(vaddr, watchcode, ta) ||
1293 1268 lwp->lwp_sysabort)) {
1294 1269 lwp->lwp_sysabort = 0;
1295 1270 rv = -1;
1296 1271 break;
1297 1272 }
1298 1273 if (watchcode == 0 || ta != 0)
1299 1274 break;
1300 1275 }
1301 1276
1302 1277 return (rv);
1303 1278 }
1304 1279
1305 1280 static int
1306 1281 watch_suword16(void *addr, uint16_t value)
1307 1282 {
1308 1283 klwp_t *lwp = ttolwp(curthread);
1309 1284 int watchcode;
1310 1285 caddr_t vaddr;
1311 1286 int mapped;
1312 1287 int rv = 0;
1313 1288 int ta;
1314 1289 label_t ljb;
1315 1290
1316 1291 for (;;) {
1317 1292
1318 1293 vaddr = (caddr_t)addr;
1319 1294 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1320 1295 S_WRITE);
1321 1296 if (watchcode == 0 || ta != 0) {
1322 1297 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1323 1298 S_WRITE, 1);
1324 1299 if (on_fault(&ljb))
1325 1300 rv = -1;
1326 1301 else
1327 1302 suword16_noerr(addr, value);
1328 1303 no_fault();
1329 1304 if (mapped)
1330 1305 pr_unmappage((caddr_t)addr, sizeof (value),
1331 1306 S_WRITE, 1);
1332 1307 }
1333 1308 if (watchcode &&
1334 1309 (!sys_watchpoint(vaddr, watchcode, ta) ||
1335 1310 lwp->lwp_sysabort)) {
1336 1311 lwp->lwp_sysabort = 0;
1337 1312 rv = -1;
1338 1313 break;
1339 1314 }
1340 1315 if (watchcode == 0 || ta != 0)
1341 1316 break;
1342 1317 }
1343 1318
1344 1319 return (rv);
1345 1320 }
1346 1321
1347 1322 static int
1348 1323 watch_suword32(void *addr, uint32_t value)
1349 1324 {
1350 1325 klwp_t *lwp = ttolwp(curthread);
1351 1326 int watchcode;
1352 1327 caddr_t vaddr;
1353 1328 int mapped;
1354 1329 int rv = 0;
1355 1330 int ta;
1356 1331 label_t ljb;
1357 1332
1358 1333 for (;;) {
1359 1334
1360 1335 vaddr = (caddr_t)addr;
1361 1336 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1362 1337 S_WRITE);
1363 1338 if (watchcode == 0 || ta != 0) {
1364 1339 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1365 1340 S_WRITE, 1);
1366 1341 if (on_fault(&ljb))
1367 1342 rv = -1;
1368 1343 else
1369 1344 suword32_noerr(addr, value);
1370 1345 no_fault();
1371 1346 if (mapped)
1372 1347 pr_unmappage((caddr_t)addr, sizeof (value),
1373 1348 S_WRITE, 1);
1374 1349 }
1375 1350 if (watchcode &&
1376 1351 (!sys_watchpoint(vaddr, watchcode, ta) ||
1377 1352 lwp->lwp_sysabort)) {
1378 1353 lwp->lwp_sysabort = 0;
1379 1354 rv = -1;
1380 1355 break;
1381 1356 }
1382 1357 if (watchcode == 0 || ta != 0)
1383 1358 break;
1384 1359 }
1385 1360
1386 1361 return (rv);
1387 1362 }
1388 1363
1389 1364 #ifdef _LP64
1390 1365 static int
1391 1366 watch_suword64(void *addr, uint64_t value)
1392 1367 {
1393 1368 klwp_t *lwp = ttolwp(curthread);
1394 1369 int watchcode;
1395 1370 caddr_t vaddr;
1396 1371 int mapped;
1397 1372 int rv = 0;
1398 1373 int ta;
1399 1374 label_t ljb;
1400 1375
1401 1376 for (;;) {
1402 1377
1403 1378 vaddr = (caddr_t)addr;
1404 1379 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1405 1380 S_WRITE);
1406 1381 if (watchcode == 0 || ta != 0) {
1407 1382 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1408 1383 S_WRITE, 1);
1409 1384 if (on_fault(&ljb))
1410 1385 rv = -1;
1411 1386 else
1412 1387 suword64_noerr(addr, value);
1413 1388 no_fault();
1414 1389 if (mapped)
1415 1390 pr_unmappage((caddr_t)addr, sizeof (value),
1416 1391 S_WRITE, 1);
1417 1392 }
1418 1393 if (watchcode &&
1419 1394 (!sys_watchpoint(vaddr, watchcode, ta) ||
1420 1395 lwp->lwp_sysabort)) {
1421 1396 lwp->lwp_sysabort = 0;
1422 1397 rv = -1;
1423 1398 break;
1424 1399 }
1425 1400 if (watchcode == 0 || ta != 0)
1426 1401 break;
1427 1402 }
1428 1403
1429 1404 return (rv);
1430 1405 }
1431 1406 #endif /* _LP64 */
1432 1407
1433 1408 /*
1434 1409 * Check for watched addresses in the given address space.
1435 1410 * Return 1 if this is true, otherwise 0.
1436 1411 */
1437 1412 static int
1438 1413 pr_is_watched(caddr_t base, size_t len, int rw)
1439 1414 {
1440 1415 caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK);
1441 1416 caddr_t eaddr = base + len;
1442 1417 caddr_t paddr;
1443 1418
1444 1419 for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) {
1445 1420 if (pr_is_watchpage(paddr, rw))
1446 1421 return (1);
1447 1422 }
1448 1423
1449 1424 return (0);
1450 1425 }
1451 1426
1452 1427 /*
1453 1428 * Wrapper for the physio() function.
1454 1429 * Splits one uio operation with multiple iovecs into uio operations with
1455 1430 * only one iovecs to do the watchpoint handling separately for each iovecs.
1456 1431 */
1457 1432 static int
1458 1433 watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev,
1459 1434 int rw, void (*mincnt)(struct buf *), struct uio *uio)
1460 1435 {
1461 1436 struct uio auio;
1462 1437 struct iovec *iov;
1463 1438 caddr_t base;
1464 1439 size_t len;
1465 1440 int seg_rw;
1466 1441 int error = 0;
1467 1442
1468 1443 if (uio->uio_segflg == UIO_SYSSPACE)
1469 1444 return (default_physio(strat, bp, dev, rw, mincnt, uio));
1470 1445
1471 1446 seg_rw = (rw == B_READ) ? S_WRITE : S_READ;
1472 1447
1473 1448 while (uio->uio_iovcnt > 0) {
1474 1449 if (uio->uio_resid == 0) {
1475 1450 /*
1476 1451 * Make sure to return the uio structure with the
1477 1452 * same values as default_physio() does.
1478 1453 */
1479 1454 uio->uio_iov++;
1480 1455 uio->uio_iovcnt--;
1481 1456 continue;
1482 1457 }
1483 1458
1484 1459 iov = uio->uio_iov;
1485 1460 len = MIN(iov->iov_len, uio->uio_resid);
1486 1461
1487 1462 auio.uio_iovcnt = 1;
1488 1463 auio.uio_iov = iov;
1489 1464 auio.uio_resid = len;
1490 1465 auio.uio_loffset = uio->uio_loffset;
1491 1466 auio.uio_llimit = uio->uio_llimit;
1492 1467 auio.uio_fmode = uio->uio_fmode;
1493 1468 auio.uio_extflg = uio->uio_extflg;
1494 1469 auio.uio_segflg = uio->uio_segflg;
1495 1470
1496 1471 base = iov->iov_base;
1497 1472
1498 1473 if (!pr_is_watched(base, len, seg_rw)) {
1499 1474 /*
1500 1475 * The given memory references don't cover a
1501 1476 * watched page.
1502 1477 */
1503 1478 error = default_physio(strat, bp, dev, rw, mincnt,
1504 1479 &auio);
1505 1480
1506 1481 /* Update uio with values from auio. */
1507 1482 len -= auio.uio_resid;
1508 1483 uio->uio_resid -= len;
1509 1484 uio->uio_loffset += len;
1510 1485
1511 1486 /*
1512 1487 * Return if an error occurred or not all data
1513 1488 * was copied.
1514 1489 */
1515 1490 if (auio.uio_resid || error)
1516 1491 break;
1517 1492 uio->uio_iov++;
1518 1493 uio->uio_iovcnt--;
1519 1494 } else {
1520 1495 int mapped, watchcode, ta;
1521 1496 caddr_t vaddr = base;
1522 1497 klwp_t *lwp = ttolwp(curthread);
1523 1498
1524 1499 watchcode = pr_is_watchpoint(&vaddr, &ta, len,
1525 1500 NULL, seg_rw);
1526 1501
1527 1502 if (watchcode == 0 || ta != 0) {
1528 1503 /*
1529 1504 * Do the io if the given memory references
1530 1505 * don't cover a watched area (watchcode=0)
1531 1506 * or if WA_TRAPAFTER was specified.
1532 1507 */
1533 1508 mapped = pr_mappage(base, len, seg_rw, 1);
1534 1509 error = default_physio(strat, bp, dev, rw,
1535 1510 mincnt, &auio);
1536 1511 if (mapped)
1537 1512 pr_unmappage(base, len, seg_rw, 1);
1538 1513
1539 1514 len -= auio.uio_resid;
1540 1515 uio->uio_resid -= len;
1541 1516 uio->uio_loffset += len;
1542 1517 }
1543 1518
1544 1519 /*
1545 1520 * If we hit a watched address, do the watchpoint logic.
1546 1521 */
1547 1522 if (watchcode &&
1548 1523 (!sys_watchpoint(vaddr, watchcode, ta) ||
1549 1524 lwp->lwp_sysabort)) {
1550 1525 lwp->lwp_sysabort = 0;
1551 1526 return (EFAULT);
1552 1527 }
1553 1528
1554 1529 /*
1555 1530 * Check for errors from default_physio().
1556 1531 */
1557 1532 if (watchcode == 0 || ta != 0) {
1558 1533 if (auio.uio_resid || error)
1559 1534 break;
1560 1535 uio->uio_iov++;
1561 1536 uio->uio_iovcnt--;
1562 1537 }
1563 1538 }
1564 1539 }
1565 1540
1566 1541 return (error);
1567 1542 }
1568 1543
1569 1544 int
1570 1545 wa_compare(const void *a, const void *b)
1571 1546 {
1572 1547 const watched_area_t *pa = a;
1573 1548 const watched_area_t *pb = b;
1574 1549
1575 1550 if (pa->wa_vaddr < pb->wa_vaddr)
1576 1551 return (-1);
1577 1552 else if (pa->wa_vaddr > pb->wa_vaddr)
1578 1553 return (1);
1579 1554 else
1580 1555 return (0);
1581 1556 }
1582 1557
1583 1558 int
1584 1559 wp_compare(const void *a, const void *b)
1585 1560 {
1586 1561 const watched_page_t *pa = a;
1587 1562 const watched_page_t *pb = b;
1588 1563
1589 1564 if (pa->wp_vaddr < pb->wp_vaddr)
1590 1565 return (-1);
1591 1566 else if (pa->wp_vaddr > pb->wp_vaddr)
1592 1567 return (1);
1593 1568 else
1594 1569 return (0);
1595 1570 }
1596 1571
1597 1572 /*
1598 1573 * Given an address range, finds the first watched area which overlaps some or
1599 1574 * all of the range.
1600 1575 */
1601 1576 watched_area_t *
1602 1577 pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where)
1603 1578 {
1604 1579 caddr_t vaddr = pwa->wa_vaddr;
1605 1580 caddr_t eaddr = pwa->wa_eaddr;
1606 1581 watched_area_t *wap;
1607 1582 avl_index_t real_where;
1608 1583
1609 1584 /* First, check if there is an exact match. */
1610 1585 wap = avl_find(&p->p_warea, pwa, &real_where);
1611 1586
1612 1587
1613 1588 /* Check to see if we overlap with the previous area. */
1614 1589 if (wap == NULL) {
1615 1590 wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE);
1616 1591 if (wap != NULL &&
1617 1592 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1618 1593 wap = NULL;
1619 1594 }
1620 1595
1621 1596 /* Try the next area. */
1622 1597 if (wap == NULL) {
1623 1598 wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER);
1624 1599 if (wap != NULL &&
1625 1600 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1626 1601 wap = NULL;
1627 1602 }
1628 1603
1629 1604 if (where)
1630 1605 *where = real_where;
1631 1606
1632 1607 return (wap);
1633 1608 }
1634 1609
1635 1610 void
1636 1611 watch_enable(kthread_id_t t)
1637 1612 {
1638 1613 t->t_proc_flag |= TP_WATCHPT;
1639 1614 install_copyops(t, &watch_copyops);
1640 1615 }
1641 1616
1642 1617 void
1643 1618 watch_disable(kthread_id_t t)
1644 1619 {
1645 1620 t->t_proc_flag &= ~TP_WATCHPT;
1646 1621 remove_copyops(t);
1647 1622 }
1648 1623
1649 1624 int
1650 1625 copyin_nowatch(const void *uaddr, void *kaddr, size_t len)
1651 1626 {
1652 1627 int watched, ret;
1653 1628
1654 1629 watched = watch_disable_addr(uaddr, len, S_READ);
1655 1630 ret = copyin(uaddr, kaddr, len);
1656 1631 if (watched)
1657 1632 watch_enable_addr(uaddr, len, S_READ);
1658 1633
1659 1634 return (ret);
1660 1635 }
1661 1636
1662 1637 int
1663 1638 copyout_nowatch(const void *kaddr, void *uaddr, size_t len)
1664 1639 {
1665 1640 int watched, ret;
1666 1641
1667 1642 watched = watch_disable_addr(uaddr, len, S_WRITE);
1668 1643 ret = copyout(kaddr, uaddr, len);
1669 1644 if (watched)
1670 1645 watch_enable_addr(uaddr, len, S_WRITE);
1671 1646
1672 1647 return (ret);
1673 1648 }
1674 1649
1675 1650 #ifdef _LP64
1676 1651 int
1677 1652 fuword64_nowatch(const void *addr, uint64_t *value)
1678 1653 {
1679 1654 int watched, ret;
1680 1655
1681 1656 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1682 1657 ret = fuword64(addr, value);
1683 1658 if (watched)
1684 1659 watch_enable_addr(addr, sizeof (*value), S_READ);
1685 1660
1686 1661 return (ret);
1687 1662 }
1688 1663 #endif
1689 1664
1690 1665 int
1691 1666 fuword32_nowatch(const void *addr, uint32_t *value)
1692 1667 {
1693 1668 int watched, ret;
1694 1669
1695 1670 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1696 1671 ret = fuword32(addr, value);
1697 1672 if (watched)
1698 1673 watch_enable_addr(addr, sizeof (*value), S_READ);
1699 1674
1700 1675 return (ret);
1701 1676 }
1702 1677
1703 1678 #ifdef _LP64
1704 1679 int
1705 1680 suword64_nowatch(void *addr, uint64_t value)
1706 1681 {
1707 1682 int watched, ret;
1708 1683
1709 1684 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1710 1685 ret = suword64(addr, value);
1711 1686 if (watched)
1712 1687 watch_enable_addr(addr, sizeof (value), S_WRITE);
1713 1688
1714 1689 return (ret);
1715 1690 }
1716 1691 #endif
1717 1692
1718 1693 int
1719 1694 suword32_nowatch(void *addr, uint32_t value)
1720 1695 {
1721 1696 int watched, ret;
1722 1697
1723 1698 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1724 1699 ret = suword32(addr, value);
1725 1700 if (watched)
1726 1701 watch_enable_addr(addr, sizeof (value), S_WRITE);
1727 1702
1728 1703 return (ret);
1729 1704 }
1730 1705
1731 1706 int
1732 1707 watch_disable_addr(const void *addr, size_t len, enum seg_rw rw)
1733 1708 {
1734 1709 if (pr_watch_active(curproc))
1735 1710 return (pr_mappage((caddr_t)addr, len, rw, 1));
1736 1711 return (0);
1737 1712 }
1738 1713
1739 1714 void
1740 1715 watch_enable_addr(const void *addr, size_t len, enum seg_rw rw)
1741 1716 {
1742 1717 if (pr_watch_active(curproc))
1743 1718 pr_unmappage((caddr_t)addr, len, rw, 1);
1744 1719 }
↓ open down ↓ |
1222 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX