641 hval = 0;
642 while (*p) {
643 hval = (hval << 4) + *p++;
644 if ((g = (hval & 0xf0000000)) != 0)
645 hval ^= g >> 24;
646 hval &= ~g;
647 }
648 return (hval);
649 }
650
651 static void
652 cyclic_coverage(char *why, int level, uint64_t arg0, uint64_t arg1)
653 {
654 uint_t ndx, orig;
655
656 for (ndx = orig = cyclic_coverage_hash(why) % CY_NCOVERAGE; ; ) {
657 if (cyc_coverage[ndx].cyv_why == why)
658 break;
659
660 if (cyc_coverage[ndx].cyv_why != NULL ||
661 casptr(&cyc_coverage[ndx].cyv_why, NULL, why) != NULL) {
662
663 if (++ndx == CY_NCOVERAGE)
664 ndx = 0;
665
666 if (ndx == orig)
667 panic("too many cyclic coverage points");
668 continue;
669 }
670
671 /*
672 * If we're here, we have successfully swung our guy into
673 * the position at "ndx".
674 */
675 break;
676 }
677
678 if (level == CY_PASSIVE_LEVEL)
679 cyc_coverage[ndx].cyv_passive_count++;
680 else
681 cyc_coverage[ndx].cyv_count[level]++;
1202 break;
1203 }
1204
1205 /*
1206 * We must have had a resize interrupt us.
1207 */
1208 CYC_TRACE(cpu, level, "resize-int", cyclics, 0);
1209 ASSERT(cpu->cyp_state == CYS_EXPANDING);
1210 ASSERT(cyclics != cpu->cyp_cyclics);
1211 ASSERT(resized == 0);
1212 ASSERT(intr_resized == 0);
1213 intr_resized = 1;
1214 cyclics = cpu->cyp_cyclics;
1215 cyclic = &cyclics[buf[consmasked]];
1216 ASSERT(cyclic->cy_handler == handler);
1217 ASSERT(cyclic->cy_arg == arg);
1218 goto reread;
1219 }
1220
1221 if ((opend =
1222 cas32(&cyclic->cy_pend, pend, npend)) != pend) {
1223 /*
1224 * Our cas32 can fail for one of several
1225 * reasons:
1226 *
1227 * (a) An intervening high level bumped up the
1228 * pend count on this cyclic. In this
1229 * case, we will see a higher pend.
1230 *
1231 * (b) The cyclics array has been yanked out
1232 * from underneath us by a resize
1233 * operation. In this case, pend is 0 and
1234 * cyp_state is CYS_EXPANDING.
1235 *
1236 * (c) The cyclic has been removed by an
1237 * intervening remove-xcall. In this case,
1238 * pend will be 0, the cyp_state will be
1239 * CYS_REMOVING, and the cyclic will be
1240 * marked CYF_FREE.
1241 *
1242 * The assertion below checks that we are
1243 * in one of the above situations. The
1244 * action under all three is to return to
1294
1295 ASSERT(resized == 0);
1296 resized = 1;
1297 goto top;
1298 }
1299
1300 /*
1301 * If we were interrupted by a resize operation, then we must have
1302 * seen the hard index change.
1303 */
1304 ASSERT(!(intr_resized == 1 && resized == 0));
1305
1306 if (resized) {
1307 uint32_t lev, nlev;
1308
1309 ASSERT(cpu->cyp_state == CYS_EXPANDING);
1310
1311 do {
1312 lev = cpu->cyp_modify_levels;
1313 nlev = lev + 1;
1314 } while (cas32(&cpu->cyp_modify_levels, lev, nlev) != lev);
1315
1316 /*
1317 * If we are the last soft level to see the modification,
1318 * post on cyp_modify_wait. Otherwise, (if we're not
1319 * already at low level), post down to the next soft level.
1320 */
1321 if (nlev == CY_SOFT_LEVELS) {
1322 CYC_TRACE0(cpu, level, "resize-kick");
1323 sema_v(&cpu->cyp_modify_wait);
1324 } else {
1325 ASSERT(nlev < CY_SOFT_LEVELS);
1326 if (level != CY_LOW_LEVEL) {
1327 cyc_backend_t *be = cpu->cyp_backend;
1328
1329 CYC_TRACE0(cpu, level, "resize-post");
1330 be->cyb_softint(be->cyb_arg, level - 1);
1331 }
1332 }
1333 }
1334 }
|
641 hval = 0;
642 while (*p) {
643 hval = (hval << 4) + *p++;
644 if ((g = (hval & 0xf0000000)) != 0)
645 hval ^= g >> 24;
646 hval &= ~g;
647 }
648 return (hval);
649 }
650
651 static void
652 cyclic_coverage(char *why, int level, uint64_t arg0, uint64_t arg1)
653 {
654 uint_t ndx, orig;
655
656 for (ndx = orig = cyclic_coverage_hash(why) % CY_NCOVERAGE; ; ) {
657 if (cyc_coverage[ndx].cyv_why == why)
658 break;
659
660 if (cyc_coverage[ndx].cyv_why != NULL ||
661 atomic_cas_ptr(&cyc_coverage[ndx].cyv_why, NULL, why) !=
662 NULL) {
663
664 if (++ndx == CY_NCOVERAGE)
665 ndx = 0;
666
667 if (ndx == orig)
668 panic("too many cyclic coverage points");
669 continue;
670 }
671
672 /*
673 * If we're here, we have successfully swung our guy into
674 * the position at "ndx".
675 */
676 break;
677 }
678
679 if (level == CY_PASSIVE_LEVEL)
680 cyc_coverage[ndx].cyv_passive_count++;
681 else
682 cyc_coverage[ndx].cyv_count[level]++;
1203 break;
1204 }
1205
1206 /*
1207 * We must have had a resize interrupt us.
1208 */
1209 CYC_TRACE(cpu, level, "resize-int", cyclics, 0);
1210 ASSERT(cpu->cyp_state == CYS_EXPANDING);
1211 ASSERT(cyclics != cpu->cyp_cyclics);
1212 ASSERT(resized == 0);
1213 ASSERT(intr_resized == 0);
1214 intr_resized = 1;
1215 cyclics = cpu->cyp_cyclics;
1216 cyclic = &cyclics[buf[consmasked]];
1217 ASSERT(cyclic->cy_handler == handler);
1218 ASSERT(cyclic->cy_arg == arg);
1219 goto reread;
1220 }
1221
1222 if ((opend =
1223 atomic_cas_32(&cyclic->cy_pend, pend, npend)) !=
1224 pend) {
1225 /*
1226 * Our atomic_cas_32 can fail for one of several
1227 * reasons:
1228 *
1229 * (a) An intervening high level bumped up the
1230 * pend count on this cyclic. In this
1231 * case, we will see a higher pend.
1232 *
1233 * (b) The cyclics array has been yanked out
1234 * from underneath us by a resize
1235 * operation. In this case, pend is 0 and
1236 * cyp_state is CYS_EXPANDING.
1237 *
1238 * (c) The cyclic has been removed by an
1239 * intervening remove-xcall. In this case,
1240 * pend will be 0, the cyp_state will be
1241 * CYS_REMOVING, and the cyclic will be
1242 * marked CYF_FREE.
1243 *
1244 * The assertion below checks that we are
1245 * in one of the above situations. The
1246 * action under all three is to return to
1296
1297 ASSERT(resized == 0);
1298 resized = 1;
1299 goto top;
1300 }
1301
1302 /*
1303 * If we were interrupted by a resize operation, then we must have
1304 * seen the hard index change.
1305 */
1306 ASSERT(!(intr_resized == 1 && resized == 0));
1307
1308 if (resized) {
1309 uint32_t lev, nlev;
1310
1311 ASSERT(cpu->cyp_state == CYS_EXPANDING);
1312
1313 do {
1314 lev = cpu->cyp_modify_levels;
1315 nlev = lev + 1;
1316 } while (atomic_cas_32(&cpu->cyp_modify_levels, lev, nlev) !=
1317 lev);
1318
1319 /*
1320 * If we are the last soft level to see the modification,
1321 * post on cyp_modify_wait. Otherwise, (if we're not
1322 * already at low level), post down to the next soft level.
1323 */
1324 if (nlev == CY_SOFT_LEVELS) {
1325 CYC_TRACE0(cpu, level, "resize-kick");
1326 sema_v(&cpu->cyp_modify_wait);
1327 } else {
1328 ASSERT(nlev < CY_SOFT_LEVELS);
1329 if (level != CY_LOW_LEVEL) {
1330 cyc_backend_t *be = cpu->cyp_backend;
1331
1332 CYC_TRACE0(cpu, level, "resize-post");
1333 be->cyb_softint(be->cyb_arg, level - 1);
1334 }
1335 }
1336 }
1337 }
|