Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/prom_subr.c
+++ new/usr/src/uts/sun4/os/prom_subr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 26 #include <sys/types.h>
29 27 #include <sys/param.h>
30 28 #include <sys/cmn_err.h>
31 29 #include <sys/mutex.h>
32 30 #include <sys/systm.h>
33 31 #include <sys/sysmacros.h>
34 32 #include <sys/machsystm.h>
35 33 #include <sys/archsystm.h>
36 34 #include <sys/x_call.h>
37 35 #include <sys/promif.h>
38 36 #include <sys/prom_isa.h>
39 37 #include <sys/privregs.h>
40 38 #include <sys/vmem.h>
41 39 #include <sys/atomic.h>
42 40 #include <sys/panic.h>
43 41 #include <sys/rwlock.h>
44 42 #include <sys/reboot.h>
45 43 #include <sys/kdi.h>
46 44 #include <sys/kdi_machimpl.h>
47 45
48 46 /*
49 47 * We are called with a pointer to a cell-sized argument array.
50 48 * The service name (the first element of the argument array) is
51 49 * the name of the callback being invoked. When called, we are
52 50 * running on the firmwares trap table as a trusted subroutine
53 51 * of the firmware.
54 52 *
55 53 * We define entry points to allow callback handlers to be dynamically
56 54 * added and removed, to support obpsym, which is a separate module
57 55 * and can be dynamically loaded and unloaded and registers its
58 56 * callback handlers dynamically.
59 57 *
60 58 * Note: The actual callback handler we register, is the assembly lang.
61 59 * glue, callback_handler, which takes care of switching from a 64
62 60 * bit stack and environment to a 32 bit stack and environment, and
63 61 * back again, if the callback handler returns. callback_handler calls
64 62 * vx_handler to process the callback.
65 63 */
66 64
67 65 static kmutex_t vx_cmd_lock; /* protect vx_cmd table */
68 66
69 67 #define VX_CMD_MAX 10
70 68 #define ENDADDR(a) &a[sizeof (a) / sizeof (a[0])]
71 69 #define vx_cmd_end ((struct vx_cmd *)(ENDADDR(vx_cmd)))
72 70
73 71 static struct vx_cmd {
74 72 char *service; /* Service name */
75 73 int take_tba; /* If Non-zero we take over the tba */
76 74 void (*func)(cell_t *argument_array);
77 75 } vx_cmd[VX_CMD_MAX+1];
78 76
79 77 void
80 78 init_vx_handler(void)
81 79 {
82 80 extern int callback_handler(cell_t *arg_array);
83 81
84 82 /*
85 83 * initialize the lock protecting additions and deletions from
86 84 * the vx_cmd table. At callback time we don't need to grab
87 85 * this lock. Callback handlers do not need to modify the
88 86 * callback handler table.
89 87 */
90 88 mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
91 89
92 90 /*
93 91 * Tell OBP about our callback handler.
94 92 */
95 93 (void) prom_set_callback((void *)callback_handler);
96 94 }
97 95
98 96 /*
99 97 * Add a kernel callback handler to the kernel's list.
100 98 * The table is static, so if you add a callback handler, increase
101 99 * the value of VX_CMD_MAX. Find the first empty slot and use it.
102 100 */
103 101 void
104 102 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
105 103 {
106 104 struct vx_cmd *vp;
107 105
108 106 mutex_enter(&vx_cmd_lock);
109 107 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
110 108 if (vp->service == NULL) {
111 109 vp->service = name;
112 110 vp->take_tba = flag;
113 111 vp->func = func;
114 112 mutex_exit(&vx_cmd_lock);
115 113 return;
116 114 }
117 115 }
118 116 mutex_exit(&vx_cmd_lock);
119 117
120 118 #ifdef DEBUG
121 119
122 120 /*
123 121 * There must be enough entries to handle all callback entries.
124 122 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
125 123 */
126 124 cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
127 125 /* NOTREACHED */
128 126
129 127 #else /* DEBUG */
130 128
131 129 cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
132 130 name);
133 131
134 132 #endif /* DEBUG */
135 133
136 134 }
137 135
138 136 /*
139 137 * Remove a vx_handler function -- find the name string in the table,
140 138 * and clear it.
141 139 */
142 140 void
143 141 remove_vx_handler(char *name)
144 142 {
145 143 struct vx_cmd *vp;
146 144
147 145 mutex_enter(&vx_cmd_lock);
148 146 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
149 147 if (vp->service == NULL)
150 148 continue;
151 149 if (strcmp(vp->service, name) != 0)
152 150 continue;
153 151 vp->service = 0;
154 152 vp->take_tba = 0;
155 153 vp->func = 0;
156 154 mutex_exit(&vx_cmd_lock);
157 155 return;
158 156 }
159 157 mutex_exit(&vx_cmd_lock);
160 158 cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
161 159 }
162 160
163 161 int
164 162 vx_handler(cell_t *argument_array)
165 163 {
166 164 char *name;
167 165 struct vx_cmd *vp;
168 166 void *old_tba;
169 167
170 168 name = p1275_cell2ptr(*argument_array);
171 169
172 170 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
173 171 if (vp->service == (char *)0)
174 172 continue;
175 173 if (strcmp(vp->service, name) != 0)
176 174 continue;
177 175 if (vp->take_tba != 0) {
178 176 reestablish_curthread();
179 177 if (tba_taken_over != 0)
180 178 old_tba = set_tba((void *)&trap_table);
181 179 }
182 180 vp->func(argument_array);
183 181 if ((vp->take_tba != 0) && (tba_taken_over != 0))
184 182 (void) set_tba(old_tba);
185 183 return (0); /* Service name was known */
186 184 }
187 185
188 186 return (-1); /* Service name unknown */
189 187 }
190 188
191 189 /*
192 190 * PROM Locking Primitives
193 191 *
194 192 * These routines are called immediately before and immediately after calling
195 193 * into the firmware. The firmware is single-threaded and assumes that the
196 194 * kernel will implement locking to prevent simultaneous service calls. In
197 195 * addition, some service calls (particularly character rendering) can be
198 196 * slow, so we would like to sleep if we cannot acquire the lock to allow the
199 197 * caller's CPU to continue to perform useful work in the interim. Service
200 198 * routines may also be called early in boot as part of slave CPU startup
201 199 * when mutexes and cvs are not yet available (i.e. they are still running on
202 200 * the prom's TLB handlers and cannot touch curthread). Therefore, these
203 201 * routines must reduce to a simple compare-and-swap spin lock when necessary.
204 202 * Finally, kernel code may wish to acquire the firmware lock before executing
205 203 * a block of code that includes service calls, so we also allow the firmware
206 204 * lock to be acquired recursively by the owning CPU after disabling preemption.
207 205 *
208 206 * To meet these constraints, the lock itself is implemented as a compare-and-
209 207 * swap spin lock on the global prom_cpu pointer. We implement recursion by
210 208 * atomically incrementing the integer prom_holdcnt after acquiring the lock.
211 209 * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
212 210 * we disable preemption before acquiring the lock and leave it disabled once
213 211 * the lock is held. The kern_postprom() routine then enables preemption if
214 212 * we drop the lock and prom_holdcnt returns to zero. If the current CPU is
215 213 * an adult and the lock is held by another adult CPU, we can safely sleep
216 214 * until the lock is released. To do so, we acquire the adaptive prom_mutex
217 215 * and then sleep on prom_cv. Therefore, service routines must not be called
218 216 * from above LOCK_LEVEL on any adult CPU. Finally, if recursive entry is
219 217 * attempted on an adult CPU, we must also verify that curthread matches the
220 218 * saved prom_thread (the original owner) to ensure that low-level interrupt
221 219 * threads do not step on other threads running on the same CPU.
222 220 */
223 221
224 222 static cpu_t *volatile prom_cpu;
225 223 static kthread_t *volatile prom_thread;
226 224 static uint32_t prom_holdcnt;
227 225 static kmutex_t prom_mutex;
228 226 static kcondvar_t prom_cv;
229 227
230 228 /*
231 229 * The debugger uses PROM services, and is thus unable to run if any of the
232 230 * CPUs on the system are executing in the PROM at the time of debugger entry.
233 231 * If a CPU is determined to be in the PROM when the debugger is entered,
234 232 * prom_return_enter_debugger will be set, thus triggering a programmed debugger
235 233 * entry when the given CPU returns from the PROM. That CPU is then released by
236 234 * the debugger, and is allowed to complete PROM-related work.
237 235 */
238 236 int prom_exit_enter_debugger;
239 237
240 238 void
241 239 kern_preprom(void)
242 240 {
243 241 for (;;) {
244 242 /*
245 243 * Load the current CPU pointer and examine the mutex_ready bit.
246 244 * It doesn't matter if we are preempted here because we are
247 245 * only trying to determine if we are in the *set* of mutex
248 246 * ready CPUs. We cannot disable preemption until we confirm
249 247 * that we are running on a CPU in this set, since a call to
250 248 * kpreempt_disable() requires access to curthread.
251 249 */
252 250 processorid_t cpuid = getprocessorid();
253 251 cpu_t *cp = cpu[cpuid];
254 252 cpu_t *prcp;
255 253
256 254 if (panicstr)
257 255 return; /* just return if we are currently panicking */
258 256
259 257 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
260 258 /*
261 259 * Disable premption, and reload the current CPU. We
262 260 * can't move from a mutex_ready cpu to a non-ready cpu
263 261 * so we don't need to re-check cp->cpu_m.mutex_ready.
264 262 */
265 263 kpreempt_disable();
↓ open down ↓ |
228 lines elided |
↑ open up ↑ |
266 264 cp = CPU;
267 265 ASSERT(cp->cpu_m.mutex_ready);
268 266
269 267 /*
270 268 * Try the lock. If we don't get the lock, re-enable
271 269 * preemption and see if we should sleep. If we are
272 270 * already the lock holder, remove the effect of the
273 271 * previous kpreempt_disable() before returning since
274 272 * preemption was disabled by an earlier kern_preprom.
275 273 */
276 - prcp = casptr((void *)&prom_cpu, NULL, cp);
274 + prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
277 275 if (prcp == NULL ||
278 276 (prcp == cp && prom_thread == curthread)) {
279 277 if (prcp == cp)
280 278 kpreempt_enable();
281 279 break;
282 280 }
283 281
284 282 kpreempt_enable();
285 283
286 284 /*
287 285 * We have to be very careful here since both prom_cpu
288 286 * and prcp->cpu_m.mutex_ready can be changed at any
289 287 * time by a non mutex_ready cpu holding the lock.
290 288 * If the owner is mutex_ready, holding prom_mutex
291 289 * prevents kern_postprom() from completing. If the
292 290 * owner isn't mutex_ready, we only know it will clear
293 291 * prom_cpu before changing cpu_m.mutex_ready, so we
294 292 * issue a membar after checking mutex_ready and then
295 293 * re-verify that prom_cpu is still held by the same
296 294 * cpu before actually proceeding to cv_wait().
297 295 */
298 296 mutex_enter(&prom_mutex);
299 297 prcp = prom_cpu;
300 298 if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
301 299 membar_consumer();
302 300 if (prcp == prom_cpu)
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
303 301 cv_wait(&prom_cv, &prom_mutex);
304 302 }
305 303 mutex_exit(&prom_mutex);
306 304
307 305 } else {
308 306 /*
309 307 * If we are not yet mutex_ready, just attempt to grab
310 308 * the lock. If we get it or already hold it, break.
311 309 */
312 310 ASSERT(getpil() == PIL_MAX);
313 - prcp = casptr((void *)&prom_cpu, NULL, cp);
311 + prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
314 312 if (prcp == NULL || prcp == cp)
315 313 break;
316 314 }
317 315 }
318 316
319 317 /*
320 318 * We now hold the prom_cpu lock. Increment the hold count by one
321 319 * and assert our current state before returning to the caller.
322 320 */
323 321 atomic_add_32(&prom_holdcnt, 1);
324 322 ASSERT(prom_holdcnt >= 1);
325 323 prom_thread = curthread;
326 324 }
327 325
328 326 /*
329 327 * Drop the prom lock if it is held by the current CPU. If the lock is held
330 328 * recursively, return without clearing prom_cpu. If the hold count is now
331 329 * zero, clear prom_cpu and cv_signal any waiting CPU.
332 330 */
333 331 void
334 332 kern_postprom(void)
335 333 {
336 334 processorid_t cpuid = getprocessorid();
337 335 cpu_t *cp = cpu[cpuid];
338 336
339 337 if (panicstr)
340 338 return; /* do not modify lock further if we have panicked */
341 339
342 340 if (prom_cpu != cp)
343 341 panic("kern_postprom: not owner, cp=%p owner=%p",
344 342 (void *)cp, (void *)prom_cpu);
345 343
346 344 if (prom_holdcnt == 0)
347 345 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
348 346 (void *)prom_cpu);
349 347
350 348 if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
351 349 return; /* prom lock is held recursively by this CPU */
352 350
353 351 if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
354 352 kmdb_enter();
355 353
356 354 prom_thread = NULL;
357 355 membar_producer();
358 356
359 357 prom_cpu = NULL;
360 358 membar_producer();
361 359
362 360 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
363 361 mutex_enter(&prom_mutex);
364 362 cv_signal(&prom_cv);
365 363 mutex_exit(&prom_mutex);
366 364 kpreempt_enable();
367 365 }
368 366 }
369 367
370 368 /*
371 369 * If the frame buffer device is busy, briefly capture the other CPUs so that
372 370 * another CPU executing code to manipulate the device does not execute at the
373 371 * same time we are rendering characters. Refer to the comments and code in
374 372 * common/os/console.c for more information on these callbacks.
375 373 *
376 374 * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
377 375 * to idling other CPUs. The idling mechanism will cross-trap the other CPUs
378 376 * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
379 377 * them are holding the PROM lock before we idle them and then call into the
380 378 * PROM routines that render characters to the frame buffer.
381 379 */
382 380 int
383 381 console_enter(int busy)
384 382 {
385 383 int s = 0;
386 384
387 385 if (busy && panicstr == NULL) {
388 386 kern_preprom();
389 387 s = splhi();
390 388 idle_other_cpus();
391 389 }
392 390
393 391 return (s);
394 392 }
395 393
396 394 void
397 395 console_exit(int busy, int spl)
398 396 {
399 397 if (busy && panicstr == NULL) {
400 398 resume_other_cpus();
401 399 splx(spl);
402 400 kern_postprom();
403 401 }
404 402 }
405 403
406 404 /*
407 405 * This routine is a special form of pause_cpus(). It ensures that
408 406 * prom functions are callable while the cpus are paused.
409 407 */
410 408 void
411 409 promsafe_pause_cpus(void)
412 410 {
413 411 pause_cpus(NULL);
414 412
415 413 /* If some other cpu is entering or is in the prom, spin */
416 414 while (prom_cpu || mutex_owner(&prom_mutex)) {
417 415
418 416 start_cpus();
419 417 mutex_enter(&prom_mutex);
420 418
421 419 /* Wait for other cpu to exit prom */
422 420 while (prom_cpu)
423 421 cv_wait(&prom_cv, &prom_mutex);
424 422
425 423 mutex_exit(&prom_mutex);
426 424 pause_cpus(NULL);
427 425 }
428 426
429 427 /* At this point all cpus are paused and none are in the prom */
430 428 }
431 429
432 430 /*
433 431 * This routine is a special form of xc_attention(). It ensures that
434 432 * prom functions are callable while the cpus are at attention.
435 433 */
436 434 void
437 435 promsafe_xc_attention(cpuset_t cpuset)
438 436 {
439 437 xc_attention(cpuset);
440 438
441 439 /* If some other cpu is entering or is in the prom, spin */
442 440 while (prom_cpu || mutex_owner(&prom_mutex)) {
443 441
444 442 xc_dismissed(cpuset);
445 443 mutex_enter(&prom_mutex);
446 444
447 445 /* Wait for other cpu to exit prom */
448 446 while (prom_cpu)
449 447 cv_wait(&prom_cv, &prom_mutex);
450 448
451 449 mutex_exit(&prom_mutex);
452 450 xc_attention(cpuset);
453 451 }
454 452
455 453 /* At this point all cpus are paused and none are in the prom */
456 454 }
457 455
458 456
459 457 #if defined(PROM_32BIT_ADDRS)
460 458
461 459 #include <sys/promimpl.h>
462 460 #include <vm/seg_kmem.h>
463 461 #include <sys/kmem.h>
464 462 #include <sys/bootconf.h>
465 463
466 464 /*
467 465 * These routines are only used to workaround "poor feature interaction"
468 466 * in OBP. See bug 4115680 for details.
469 467 *
470 468 * Many of the promif routines need to allocate temporary buffers
471 469 * with 32-bit addresses to pass in/out of the CIF. The lifetime
472 470 * of the buffers is extremely short, they are allocated and freed
473 471 * around the CIF call. We use vmem_alloc() to cache 32-bit memory.
474 472 *
475 473 * Note the code in promplat_free() to prevent exhausting the 32 bit
476 474 * heap during boot.
477 475 */
478 476 static void *promplat_last_free = NULL;
479 477 static size_t promplat_last_size;
480 478 static vmem_t *promplat_arena;
481 479 static kmutex_t promplat_lock; /* protect arena, last_free, and last_size */
482 480
483 481 void *
484 482 promplat_alloc(size_t size)
485 483 {
486 484
487 485 mutex_enter(&promplat_lock);
488 486 if (promplat_arena == NULL) {
489 487 promplat_arena = vmem_create("promplat", NULL, 0, 8,
490 488 segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
491 489 }
492 490 mutex_exit(&promplat_lock);
493 491
494 492 return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
495 493 }
496 494
497 495 /*
498 496 * Delaying the free() of small allocations gets more mileage
499 497 * from pages during boot, otherwise a cycle of allocate/free
500 498 * calls could burn through available heap32 space too quickly.
501 499 */
502 500 void
503 501 promplat_free(void *p, size_t size)
504 502 {
505 503 void *p2 = NULL;
506 504 size_t s2;
507 505
508 506 /*
509 507 * If VM is initialized, clean up any delayed free().
510 508 */
511 509 if (kvseg.s_base != 0 && promplat_last_free != NULL) {
512 510 mutex_enter(&promplat_lock);
513 511 p2 = promplat_last_free;
514 512 s2 = promplat_last_size;
515 513 promplat_last_free = NULL;
516 514 promplat_last_size = 0;
517 515 mutex_exit(&promplat_lock);
518 516 if (p2 != NULL) {
519 517 vmem_free(promplat_arena, p2, s2);
520 518 p2 = NULL;
521 519 }
522 520 }
523 521
524 522 /*
525 523 * Do the free if VM is initialized or it's a large allocation.
526 524 */
527 525 if (kvseg.s_base != 0 || size >= PAGESIZE) {
528 526 vmem_free(promplat_arena, p, size);
529 527 return;
530 528 }
531 529
532 530 /*
533 531 * Otherwise, do the last free request and delay this one.
534 532 */
535 533 mutex_enter(&promplat_lock);
536 534 if (promplat_last_free != NULL) {
537 535 p2 = promplat_last_free;
538 536 s2 = promplat_last_size;
539 537 }
540 538 promplat_last_free = p;
541 539 promplat_last_size = size;
542 540 mutex_exit(&promplat_lock);
543 541
544 542 if (p2 != NULL)
545 543 vmem_free(promplat_arena, p2, s2);
546 544 }
547 545
548 546 void
549 547 promplat_bcopy(const void *src, void *dst, size_t count)
550 548 {
551 549 bcopy(src, dst, count);
552 550 }
553 551
554 552 #endif /* PROM_32BIT_ADDRS */
555 553
556 554 static prom_generation_cookie_t prom_tree_gen;
557 555 static krwlock_t prom_tree_lock;
558 556
559 557 int
560 558 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
561 559 prom_generation_cookie_t *ckp)
562 560 {
563 561 int chg, rv;
564 562
565 563 rw_enter(&prom_tree_lock, RW_READER);
566 564 /*
567 565 * If the tree has changed since the caller last accessed it
568 566 * pass 1 as the second argument to the callback function,
569 567 * otherwise 0.
570 568 */
571 569 if (ckp != NULL && *ckp != prom_tree_gen) {
572 570 *ckp = prom_tree_gen;
573 571 chg = 1;
574 572 } else
575 573 chg = 0;
576 574 rv = callback(arg, chg);
577 575 rw_exit(&prom_tree_lock);
578 576 return (rv);
579 577 }
580 578
581 579 int
582 580 prom_tree_update(int (*callback)(void *arg), void *arg)
583 581 {
584 582 int rv;
585 583
586 584 rw_enter(&prom_tree_lock, RW_WRITER);
587 585 prom_tree_gen++;
588 586 rv = callback(arg);
589 587 rw_exit(&prom_tree_lock);
590 588 return (rv);
591 589 }
↓ open down ↓ |
268 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX