Print this page
4781 sd shouldn't abuse ddi_get_time(9f)
Reviewed by: Richard Elling <richard.elling@gmail.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/targets/sd.c
+++ new/usr/src/uts/common/io/scsi/targets/sd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 - * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 + * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
29 29 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
30 30 */
31 31 /*
32 32 * Copyright 2011 cyril.galibern@opensvc.com
33 33 */
34 34
35 35 /*
36 36 * SCSI disk target driver.
37 37 */
38 38 #include <sys/scsi/scsi.h>
39 39 #include <sys/dkbad.h>
40 40 #include <sys/dklabel.h>
41 41 #include <sys/dkio.h>
42 42 #include <sys/fdio.h>
43 43 #include <sys/cdio.h>
44 44 #include <sys/mhd.h>
45 45 #include <sys/vtoc.h>
46 46 #include <sys/dktp/fdisk.h>
47 47 #include <sys/kstat.h>
48 48 #include <sys/vtrace.h>
49 49 #include <sys/note.h>
50 50 #include <sys/thread.h>
51 51 #include <sys/proc.h>
52 52 #include <sys/efi_partition.h>
53 53 #include <sys/var.h>
54 54 #include <sys/aio_req.h>
55 55
56 56 #ifdef __lock_lint
57 57 #define _LP64
58 58 #define __amd64
59 59 #endif
60 60
61 61 #if (defined(__fibre))
62 62 /* Note: is there a leadville version of the following? */
63 63 #include <sys/fc4/fcal_linkapp.h>
64 64 #endif
65 65 #include <sys/taskq.h>
66 66 #include <sys/uuid.h>
67 67 #include <sys/byteorder.h>
68 68 #include <sys/sdt.h>
69 69
70 70 #include "sd_xbuf.h"
71 71
72 72 #include <sys/scsi/targets/sddef.h>
73 73 #include <sys/cmlb.h>
74 74 #include <sys/sysevent/eventdefs.h>
75 75 #include <sys/sysevent/dev.h>
76 76
77 77 #include <sys/fm/protocol.h>
78 78
79 79 /*
80 80 * Loadable module info.
81 81 */
82 82 #if (defined(__fibre))
83 83 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
84 84 #else /* !__fibre */
85 85 #define SD_MODULE_NAME "SCSI Disk Driver"
86 86 #endif /* !__fibre */
87 87
88 88 /*
89 89 * Define the interconnect type, to allow the driver to distinguish
90 90 * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
91 91 *
92 92 * This is really for backward compatibility. In the future, the driver
93 93 * should actually check the "interconnect-type" property as reported by
94 94 * the HBA; however at present this property is not defined by all HBAs,
95 95 * so we will use this #define (1) to permit the driver to run in
96 96 * backward-compatibility mode; and (2) to print a notification message
97 97 * if an FC HBA does not support the "interconnect-type" property. The
98 98 * behavior of the driver will be to assume parallel SCSI behaviors unless
99 99 * the "interconnect-type" property is defined by the HBA **AND** has a
100 100 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
101 101 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
102 102 * Channel behaviors (as per the old ssd). (Note that the
103 103 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
104 104 * will result in the driver assuming parallel SCSI behaviors.)
105 105 *
106 106 * (see common/sys/scsi/impl/services.h)
107 107 *
108 108 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
109 109 * since some FC HBAs may already support that, and there is some code in
110 110 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
111 111 * default would confuse that code, and besides things should work fine
112 112 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
113 113 * "interconnect_type" property.
114 114 *
115 115 */
116 116 #if (defined(__fibre))
117 117 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
118 118 #else
119 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
120 120 #endif
121 121
122 122 /*
123 123 * The name of the driver, established from the module name in _init.
124 124 */
125 125 static char *sd_label = NULL;
126 126
127 127 /*
128 128 * Driver name is unfortunately prefixed on some driver.conf properties.
129 129 */
130 130 #if (defined(__fibre))
131 131 #define sd_max_xfer_size ssd_max_xfer_size
132 132 #define sd_config_list ssd_config_list
133 133 static char *sd_max_xfer_size = "ssd_max_xfer_size";
134 134 static char *sd_config_list = "ssd-config-list";
135 135 #else
136 136 static char *sd_max_xfer_size = "sd_max_xfer_size";
137 137 static char *sd_config_list = "sd-config-list";
138 138 #endif
139 139
140 140 /*
141 141 * Driver global variables
142 142 */
143 143
144 144 #if (defined(__fibre))
145 145 /*
146 146 * These #defines are to avoid namespace collisions that occur because this
147 147 * code is currently used to compile two separate driver modules: sd and ssd.
148 148 * All global variables need to be treated this way (even if declared static)
149 149 * in order to allow the debugger to resolve the names properly.
150 150 * It is anticipated that in the near future the ssd module will be obsoleted,
151 151 * at which time this namespace issue should go away.
152 152 */
153 153 #define sd_state ssd_state
154 154 #define sd_io_time ssd_io_time
155 155 #define sd_failfast_enable ssd_failfast_enable
156 156 #define sd_ua_retry_count ssd_ua_retry_count
157 157 #define sd_report_pfa ssd_report_pfa
158 158 #define sd_max_throttle ssd_max_throttle
159 159 #define sd_min_throttle ssd_min_throttle
160 160 #define sd_rot_delay ssd_rot_delay
161 161
162 162 #define sd_retry_on_reservation_conflict \
163 163 ssd_retry_on_reservation_conflict
164 164 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay
165 165 #define sd_resv_conflict_name ssd_resv_conflict_name
166 166
167 167 #define sd_component_mask ssd_component_mask
168 168 #define sd_level_mask ssd_level_mask
169 169 #define sd_debug_un ssd_debug_un
170 170 #define sd_error_level ssd_error_level
171 171
172 172 #define sd_xbuf_active_limit ssd_xbuf_active_limit
173 173 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
174 174
175 175 #define sd_tr ssd_tr
176 176 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout
177 177 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
178 178 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable
179 179 #define sd_check_media_time ssd_check_media_time
180 180 #define sd_wait_cmds_complete ssd_wait_cmds_complete
181 181 #define sd_label_mutex ssd_label_mutex
182 182 #define sd_detach_mutex ssd_detach_mutex
183 183 #define sd_log_buf ssd_log_buf
184 184 #define sd_log_mutex ssd_log_mutex
185 185
186 186 #define sd_disk_table ssd_disk_table
187 187 #define sd_disk_table_size ssd_disk_table_size
188 188 #define sd_sense_mutex ssd_sense_mutex
189 189 #define sd_cdbtab ssd_cdbtab
190 190
191 191 #define sd_cb_ops ssd_cb_ops
192 192 #define sd_ops ssd_ops
193 193 #define sd_additional_codes ssd_additional_codes
194 194 #define sd_tgops ssd_tgops
195 195
196 196 #define sd_minor_data ssd_minor_data
197 197 #define sd_minor_data_efi ssd_minor_data_efi
198 198
199 199 #define sd_tq ssd_tq
200 200 #define sd_wmr_tq ssd_wmr_tq
201 201 #define sd_taskq_name ssd_taskq_name
202 202 #define sd_wmr_taskq_name ssd_wmr_taskq_name
203 203 #define sd_taskq_minalloc ssd_taskq_minalloc
204 204 #define sd_taskq_maxalloc ssd_taskq_maxalloc
205 205
206 206 #define sd_dump_format_string ssd_dump_format_string
207 207
208 208 #define sd_iostart_chain ssd_iostart_chain
209 209 #define sd_iodone_chain ssd_iodone_chain
210 210
211 211 #define sd_pm_idletime ssd_pm_idletime
212 212
213 213 #define sd_force_pm_supported ssd_force_pm_supported
214 214
215 215 #define sd_dtype_optical_bind ssd_dtype_optical_bind
216 216
217 217 #define sd_ssc_init ssd_ssc_init
218 218 #define sd_ssc_send ssd_ssc_send
219 219 #define sd_ssc_fini ssd_ssc_fini
220 220 #define sd_ssc_assessment ssd_ssc_assessment
221 221 #define sd_ssc_post ssd_ssc_post
222 222 #define sd_ssc_print ssd_ssc_print
223 223 #define sd_ssc_ereport_post ssd_ssc_ereport_post
224 224 #define sd_ssc_set_info ssd_ssc_set_info
225 225 #define sd_ssc_extract_info ssd_ssc_extract_info
226 226
227 227 #endif
228 228
229 229 #ifdef SDDEBUG
230 230 int sd_force_pm_supported = 0;
231 231 #endif /* SDDEBUG */
232 232
233 233 void *sd_state = NULL;
234 234 int sd_io_time = SD_IO_TIME;
235 235 int sd_failfast_enable = 1;
236 236 int sd_ua_retry_count = SD_UA_RETRY_COUNT;
237 237 int sd_report_pfa = 1;
238 238 int sd_max_throttle = SD_MAX_THROTTLE;
239 239 int sd_min_throttle = SD_MIN_THROTTLE;
240 240 int sd_rot_delay = 4; /* Default 4ms Rotation delay */
241 241 int sd_qfull_throttle_enable = TRUE;
242 242
243 243 int sd_retry_on_reservation_conflict = 1;
244 244 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
245 245 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
246 246
247 247 static int sd_dtype_optical_bind = -1;
248 248
249 249 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
250 250 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
251 251
252 252 /*
253 253 * Global data for debug logging. To enable debug printing, sd_component_mask
254 254 * and sd_level_mask should be set to the desired bit patterns as outlined in
255 255 * sddef.h.
256 256 */
257 257 uint_t sd_component_mask = 0x0;
258 258 uint_t sd_level_mask = 0x0;
259 259 struct sd_lun *sd_debug_un = NULL;
260 260 uint_t sd_error_level = SCSI_ERR_RETRYABLE;
261 261
262 262 /* Note: these may go away in the future... */
263 263 static uint32_t sd_xbuf_active_limit = 512;
264 264 static uint32_t sd_xbuf_reserve_limit = 16;
265 265
266 266 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
267 267
268 268 /*
269 269 * Timer value used to reset the throttle after it has been reduced
270 270 * (typically in response to TRAN_BUSY or STATUS_QFULL)
271 271 */
272 272 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
273 273 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
274 274
275 275 /*
276 276 * Interval value associated with the media change scsi watch.
277 277 */
278 278 static int sd_check_media_time = 3000000;
279 279
280 280 /*
281 281 * Wait value used for in progress operations during a DDI_SUSPEND
282 282 */
283 283 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
284 284
285 285 /*
286 286 * sd_label_mutex protects a static buffer used in the disk label
287 287 * component of the driver
288 288 */
289 289 static kmutex_t sd_label_mutex;
290 290
291 291 /*
292 292 * sd_detach_mutex protects un_layer_count, un_detach_count, and
293 293 * un_opens_in_progress in the sd_lun structure.
294 294 */
295 295 static kmutex_t sd_detach_mutex;
296 296
297 297 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
298 298 sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
299 299
300 300 /*
301 301 * Global buffer and mutex for debug logging
302 302 */
303 303 static char sd_log_buf[1024];
304 304 static kmutex_t sd_log_mutex;
305 305
306 306 /*
307 307 * Structs and globals for recording attached lun information.
308 308 * This maintains a chain. Each node in the chain represents a SCSI controller.
309 309 * The structure records the number of luns attached to each target connected
310 310 * with the controller.
311 311 * For parallel scsi device only.
312 312 */
313 313 struct sd_scsi_hba_tgt_lun {
314 314 struct sd_scsi_hba_tgt_lun *next;
315 315 dev_info_t *pdip;
316 316 int nlun[NTARGETS_WIDE];
317 317 };
318 318
319 319 /*
320 320 * Flag to indicate the lun is attached or detached
321 321 */
322 322 #define SD_SCSI_LUN_ATTACH 0
323 323 #define SD_SCSI_LUN_DETACH 1
324 324
325 325 static kmutex_t sd_scsi_target_lun_mutex;
326 326 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
327 327
328 328 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
329 329 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
330 330
331 331 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
332 332 sd_scsi_target_lun_head))
333 333
334 334 /*
335 335 * "Smart" Probe Caching structs, globals, #defines, etc.
336 336 * For parallel scsi and non-self-identify device only.
337 337 */
338 338
339 339 /*
340 340 * The following resources and routines are implemented to support
341 341 * "smart" probing, which caches the scsi_probe() results in an array,
342 342 * in order to help avoid long probe times.
343 343 */
344 344 struct sd_scsi_probe_cache {
345 345 struct sd_scsi_probe_cache *next;
346 346 dev_info_t *pdip;
347 347 int cache[NTARGETS_WIDE];
348 348 };
349 349
350 350 static kmutex_t sd_scsi_probe_cache_mutex;
351 351 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
352 352
353 353 /*
354 354 * Really we only need protection on the head of the linked list, but
355 355 * better safe than sorry.
356 356 */
357 357 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
358 358 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
359 359
360 360 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
361 361 sd_scsi_probe_cache_head))
362 362
363 363 /*
364 364 * Power attribute table
365 365 */
366 366 static sd_power_attr_ss sd_pwr_ss = {
367 367 { "NAME=spindle-motor", "0=off", "1=on", NULL },
368 368 {0, 100},
369 369 {30, 0},
370 370 {20000, 0}
371 371 };
372 372
373 373 static sd_power_attr_pc sd_pwr_pc = {
374 374 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
375 375 "3=active", NULL },
376 376 {0, 0, 0, 100},
377 377 {90, 90, 20, 0},
378 378 {15000, 15000, 1000, 0}
379 379 };
380 380
381 381 /*
382 382 * Power level to power condition
383 383 */
384 384 static int sd_pl2pc[] = {
385 385 SD_TARGET_START_VALID,
386 386 SD_TARGET_STANDBY,
387 387 SD_TARGET_IDLE,
388 388 SD_TARGET_ACTIVE
389 389 };
390 390
391 391 /*
392 392 * Vendor specific data name property declarations
393 393 */
394 394
395 395 #if defined(__fibre) || defined(__i386) ||defined(__amd64)
396 396
397 397 static sd_tunables seagate_properties = {
398 398 SEAGATE_THROTTLE_VALUE,
399 399 0,
400 400 0,
401 401 0,
402 402 0,
403 403 0,
404 404 0,
405 405 0,
406 406 0
407 407 };
408 408
409 409
410 410 static sd_tunables fujitsu_properties = {
411 411 FUJITSU_THROTTLE_VALUE,
412 412 0,
413 413 0,
414 414 0,
415 415 0,
416 416 0,
417 417 0,
418 418 0,
419 419 0
420 420 };
421 421
422 422 static sd_tunables ibm_properties = {
423 423 IBM_THROTTLE_VALUE,
424 424 0,
425 425 0,
426 426 0,
427 427 0,
428 428 0,
429 429 0,
430 430 0,
431 431 0
432 432 };
433 433
434 434 static sd_tunables purple_properties = {
435 435 PURPLE_THROTTLE_VALUE,
436 436 0,
437 437 0,
438 438 PURPLE_BUSY_RETRIES,
439 439 PURPLE_RESET_RETRY_COUNT,
440 440 PURPLE_RESERVE_RELEASE_TIME,
441 441 0,
442 442 0,
443 443 0
444 444 };
445 445
446 446 static sd_tunables sve_properties = {
447 447 SVE_THROTTLE_VALUE,
448 448 0,
449 449 0,
450 450 SVE_BUSY_RETRIES,
451 451 SVE_RESET_RETRY_COUNT,
452 452 SVE_RESERVE_RELEASE_TIME,
453 453 SVE_MIN_THROTTLE_VALUE,
454 454 SVE_DISKSORT_DISABLED_FLAG,
455 455 0
456 456 };
457 457
458 458 static sd_tunables maserati_properties = {
459 459 0,
460 460 0,
461 461 0,
462 462 0,
463 463 0,
464 464 0,
465 465 0,
466 466 MASERATI_DISKSORT_DISABLED_FLAG,
467 467 MASERATI_LUN_RESET_ENABLED_FLAG
468 468 };
469 469
470 470 static sd_tunables pirus_properties = {
471 471 PIRUS_THROTTLE_VALUE,
472 472 0,
473 473 PIRUS_NRR_COUNT,
474 474 PIRUS_BUSY_RETRIES,
475 475 PIRUS_RESET_RETRY_COUNT,
476 476 0,
477 477 PIRUS_MIN_THROTTLE_VALUE,
478 478 PIRUS_DISKSORT_DISABLED_FLAG,
479 479 PIRUS_LUN_RESET_ENABLED_FLAG
480 480 };
481 481
482 482 #endif
483 483
484 484 #if (defined(__sparc) && !defined(__fibre)) || \
485 485 (defined(__i386) || defined(__amd64))
486 486
487 487
488 488 static sd_tunables elite_properties = {
489 489 ELITE_THROTTLE_VALUE,
490 490 0,
491 491 0,
492 492 0,
493 493 0,
494 494 0,
495 495 0,
496 496 0,
497 497 0
498 498 };
499 499
500 500 static sd_tunables st31200n_properties = {
501 501 ST31200N_THROTTLE_VALUE,
502 502 0,
503 503 0,
504 504 0,
505 505 0,
506 506 0,
507 507 0,
508 508 0,
509 509 0
510 510 };
511 511
512 512 #endif /* Fibre or not */
513 513
514 514 static sd_tunables lsi_properties_scsi = {
515 515 LSI_THROTTLE_VALUE,
516 516 0,
517 517 LSI_NOTREADY_RETRIES,
518 518 0,
519 519 0,
520 520 0,
521 521 0,
522 522 0,
523 523 0
524 524 };
525 525
526 526 static sd_tunables symbios_properties = {
527 527 SYMBIOS_THROTTLE_VALUE,
528 528 0,
529 529 SYMBIOS_NOTREADY_RETRIES,
530 530 0,
531 531 0,
532 532 0,
533 533 0,
534 534 0,
535 535 0
536 536 };
537 537
538 538 static sd_tunables lsi_properties = {
539 539 0,
540 540 0,
541 541 LSI_NOTREADY_RETRIES,
542 542 0,
543 543 0,
544 544 0,
545 545 0,
546 546 0,
547 547 0
548 548 };
549 549
550 550 static sd_tunables lsi_oem_properties = {
551 551 0,
552 552 0,
553 553 LSI_OEM_NOTREADY_RETRIES,
554 554 0,
555 555 0,
556 556 0,
557 557 0,
558 558 0,
559 559 0,
560 560 1
561 561 };
562 562
563 563
564 564
565 565 #if (defined(SD_PROP_TST))
566 566
567 567 #define SD_TST_CTYPE_VAL CTYPE_CDROM
568 568 #define SD_TST_THROTTLE_VAL 16
569 569 #define SD_TST_NOTREADY_VAL 12
570 570 #define SD_TST_BUSY_VAL 60
571 571 #define SD_TST_RST_RETRY_VAL 36
572 572 #define SD_TST_RSV_REL_TIME 60
573 573
574 574 static sd_tunables tst_properties = {
575 575 SD_TST_THROTTLE_VAL,
576 576 SD_TST_CTYPE_VAL,
577 577 SD_TST_NOTREADY_VAL,
578 578 SD_TST_BUSY_VAL,
579 579 SD_TST_RST_RETRY_VAL,
580 580 SD_TST_RSV_REL_TIME,
581 581 0,
582 582 0,
583 583 0
584 584 };
585 585 #endif
586 586
587 587 /* This is similar to the ANSI toupper implementation */
588 588 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
589 589
590 590 /*
591 591 * Static Driver Configuration Table
592 592 *
593 593 * This is the table of disks which need throttle adjustment (or, perhaps
594 594 * something else as defined by the flags at a future time.) device_id
595 595 * is a string consisting of concatenated vid (vendor), pid (product/model)
596 596 * and revision strings as defined in the scsi_inquiry structure. Offsets of
597 597 * the parts of the string are as defined by the sizes in the scsi_inquiry
598 598 * structure. Device type is searched as far as the device_id string is
599 599 * defined. Flags defines which values are to be set in the driver from the
600 600 * properties list.
601 601 *
602 602 * Entries below which begin and end with a "*" are a special case.
603 603 * These do not have a specific vendor, and the string which follows
604 604 * can appear anywhere in the 16 byte PID portion of the inquiry data.
605 605 *
606 606 * Entries below which begin and end with a " " (blank) are a special
607 607 * case. The comparison function will treat multiple consecutive blanks
608 608 * as equivalent to a single blank. For example, this causes a
609 609 * sd_disk_table entry of " NEC CDROM " to match a device's id string
610 610 * of "NEC CDROM".
611 611 *
612 612 * Note: The MD21 controller type has been obsoleted.
613 613 * ST318202F is a Legacy device
614 614 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
615 615 * made with an FC connection. The entries here are a legacy.
616 616 */
617 617 static sd_disk_config_t sd_disk_table[] = {
618 618 #if defined(__fibre) || defined(__i386) || defined(__amd64)
619 619 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
620 620 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
621 621 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
622 622 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
623 623 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
624 624 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
625 625 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
626 626 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
627 627 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
628 628 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
629 629 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
630 630 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
631 631 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
632 632 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
633 633 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
634 634 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
635 635 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
636 636 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
637 637 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
638 638 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
639 639 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
640 640 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
641 641 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
642 642 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
643 643 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
644 644 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
645 645 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
646 646 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
647 647 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
648 648 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
649 649 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
650 650 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
651 651 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
652 652 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
653 653 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
654 654 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
655 655 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
656 656 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
657 657 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
658 658 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
659 659 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
660 660 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
661 661 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
662 662 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
663 663 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
664 664 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
665 665 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
666 666 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
667 667 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
668 668 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
669 669 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
670 670 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
671 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
672 672 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
673 673 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
674 674 { "SUN T3", SD_CONF_BSET_THROTTLE |
675 675 SD_CONF_BSET_BSY_RETRY_COUNT|
676 676 SD_CONF_BSET_RST_RETRIES|
677 677 SD_CONF_BSET_RSV_REL_TIME,
678 678 &purple_properties },
679 679 { "SUN SESS01", SD_CONF_BSET_THROTTLE |
680 680 SD_CONF_BSET_BSY_RETRY_COUNT|
681 681 SD_CONF_BSET_RST_RETRIES|
682 682 SD_CONF_BSET_RSV_REL_TIME|
683 683 SD_CONF_BSET_MIN_THROTTLE|
684 684 SD_CONF_BSET_DISKSORT_DISABLED,
685 685 &sve_properties },
686 686 { "SUN T4", SD_CONF_BSET_THROTTLE |
687 687 SD_CONF_BSET_BSY_RETRY_COUNT|
688 688 SD_CONF_BSET_RST_RETRIES|
689 689 SD_CONF_BSET_RSV_REL_TIME,
690 690 &purple_properties },
691 691 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
692 692 SD_CONF_BSET_LUN_RESET_ENABLED,
693 693 &maserati_properties },
694 694 { "SUN SE6920", SD_CONF_BSET_THROTTLE |
695 695 SD_CONF_BSET_NRR_COUNT|
696 696 SD_CONF_BSET_BSY_RETRY_COUNT|
697 697 SD_CONF_BSET_RST_RETRIES|
698 698 SD_CONF_BSET_MIN_THROTTLE|
699 699 SD_CONF_BSET_DISKSORT_DISABLED|
700 700 SD_CONF_BSET_LUN_RESET_ENABLED,
701 701 &pirus_properties },
702 702 { "SUN SE6940", SD_CONF_BSET_THROTTLE |
703 703 SD_CONF_BSET_NRR_COUNT|
704 704 SD_CONF_BSET_BSY_RETRY_COUNT|
705 705 SD_CONF_BSET_RST_RETRIES|
706 706 SD_CONF_BSET_MIN_THROTTLE|
707 707 SD_CONF_BSET_DISKSORT_DISABLED|
708 708 SD_CONF_BSET_LUN_RESET_ENABLED,
709 709 &pirus_properties },
710 710 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
711 711 SD_CONF_BSET_NRR_COUNT|
712 712 SD_CONF_BSET_BSY_RETRY_COUNT|
713 713 SD_CONF_BSET_RST_RETRIES|
714 714 SD_CONF_BSET_MIN_THROTTLE|
715 715 SD_CONF_BSET_DISKSORT_DISABLED|
716 716 SD_CONF_BSET_LUN_RESET_ENABLED,
717 717 &pirus_properties },
718 718 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
719 719 SD_CONF_BSET_NRR_COUNT|
720 720 SD_CONF_BSET_BSY_RETRY_COUNT|
721 721 SD_CONF_BSET_RST_RETRIES|
722 722 SD_CONF_BSET_MIN_THROTTLE|
723 723 SD_CONF_BSET_DISKSORT_DISABLED|
724 724 SD_CONF_BSET_LUN_RESET_ENABLED,
725 725 &pirus_properties },
726 726 { "SUN PSX1000", SD_CONF_BSET_THROTTLE |
727 727 SD_CONF_BSET_NRR_COUNT|
728 728 SD_CONF_BSET_BSY_RETRY_COUNT|
729 729 SD_CONF_BSET_RST_RETRIES|
730 730 SD_CONF_BSET_MIN_THROTTLE|
731 731 SD_CONF_BSET_DISKSORT_DISABLED|
732 732 SD_CONF_BSET_LUN_RESET_ENABLED,
733 733 &pirus_properties },
734 734 { "SUN SE6330", SD_CONF_BSET_THROTTLE |
735 735 SD_CONF_BSET_NRR_COUNT|
736 736 SD_CONF_BSET_BSY_RETRY_COUNT|
737 737 SD_CONF_BSET_RST_RETRIES|
738 738 SD_CONF_BSET_MIN_THROTTLE|
739 739 SD_CONF_BSET_DISKSORT_DISABLED|
740 740 SD_CONF_BSET_LUN_RESET_ENABLED,
741 741 &pirus_properties },
742 742 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
743 743 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
744 744 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
745 745 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
746 746 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
747 747 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
748 748 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
749 749 #endif /* fibre or NON-sparc platforms */
750 750 #if ((defined(__sparc) && !defined(__fibre)) ||\
751 751 (defined(__i386) || defined(__amd64)))
752 752 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
753 753 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
754 754 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
755 755 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
756 756 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
757 757 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
758 758 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
759 759 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
760 760 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
761 761 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
762 762 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
763 763 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
764 764 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
765 765 &symbios_properties },
766 766 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
767 767 &lsi_properties_scsi },
768 768 #if defined(__i386) || defined(__amd64)
769 769 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
770 770 | SD_CONF_BSET_READSUB_BCD
771 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD
772 772 | SD_CONF_BSET_NO_READ_HEADER
773 773 | SD_CONF_BSET_READ_CD_XD4), NULL },
774 774
775 775 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
776 776 | SD_CONF_BSET_READSUB_BCD
777 777 | SD_CONF_BSET_READ_TOC_ADDR_BCD
778 778 | SD_CONF_BSET_NO_READ_HEADER
779 779 | SD_CONF_BSET_READ_CD_XD4), NULL },
780 780 #endif /* __i386 || __amd64 */
781 781 #endif /* sparc NON-fibre or NON-sparc platforms */
782 782
783 783 #if (defined(SD_PROP_TST))
784 784 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
785 785 | SD_CONF_BSET_CTYPE
786 786 | SD_CONF_BSET_NRR_COUNT
787 787 | SD_CONF_BSET_FAB_DEVID
788 788 | SD_CONF_BSET_NOCACHE
789 789 | SD_CONF_BSET_BSY_RETRY_COUNT
790 790 | SD_CONF_BSET_PLAYMSF_BCD
791 791 | SD_CONF_BSET_READSUB_BCD
792 792 | SD_CONF_BSET_READ_TOC_TRK_BCD
793 793 | SD_CONF_BSET_READ_TOC_ADDR_BCD
794 794 | SD_CONF_BSET_NO_READ_HEADER
795 795 | SD_CONF_BSET_READ_CD_XD4
796 796 | SD_CONF_BSET_RST_RETRIES
797 797 | SD_CONF_BSET_RSV_REL_TIME
798 798 | SD_CONF_BSET_TUR_CHECK), &tst_properties},
799 799 #endif
800 800 };
801 801
802 802 static const int sd_disk_table_size =
803 803 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
804 804
805 805 /*
806 806 * Emulation mode disk drive VID/PID table
807 807 */
808 808 static char sd_flash_dev_table[][25] = {
809 809 "ATA MARVELL SD88SA02",
810 810 "MARVELL SD88SA02",
811 811 "TOSHIBA THNSNV05",
812 812 };
813 813
814 814 static const int sd_flash_dev_table_size =
815 815 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
816 816
817 817 #define SD_INTERCONNECT_PARALLEL 0
818 818 #define SD_INTERCONNECT_FABRIC 1
819 819 #define SD_INTERCONNECT_FIBRE 2
820 820 #define SD_INTERCONNECT_SSA 3
821 821 #define SD_INTERCONNECT_SATA 4
822 822 #define SD_INTERCONNECT_SAS 5
823 823
824 824 #define SD_IS_PARALLEL_SCSI(un) \
825 825 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
826 826 #define SD_IS_SERIAL(un) \
827 827 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
828 828 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
829 829
830 830 /*
831 831 * Definitions used by device id registration routines
832 832 */
833 833 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
834 834 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
835 835 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
836 836
837 837 static kmutex_t sd_sense_mutex = {0};
838 838
839 839 /*
840 840 * Macros for updates of the driver state
841 841 */
842 842 #define New_state(un, s) \
843 843 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
844 844 #define Restore_state(un) \
845 845 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
846 846
847 847 static struct sd_cdbinfo sd_cdbtab[] = {
848 848 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
849 849 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
850 850 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
851 851 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
852 852 };
853 853
854 854 /*
855 855 * Specifies the number of seconds that must have elapsed since the last
856 856 * cmd. has completed for a device to be declared idle to the PM framework.
857 857 */
858 858 static int sd_pm_idletime = 1;
859 859
860 860 /*
861 861 * Internal function prototypes
862 862 */
863 863
864 864 #if (defined(__fibre))
865 865 /*
866 866 * These #defines are to avoid namespace collisions that occur because this
867 867 * code is currently used to compile two separate driver modules: sd and ssd.
868 868 * All function names need to be treated this way (even if declared static)
869 869 * in order to allow the debugger to resolve the names properly.
870 870 * It is anticipated that in the near future the ssd module will be obsoleted,
871 871 * at which time this ugliness should go away.
872 872 */
873 873 #define sd_log_trace ssd_log_trace
874 874 #define sd_log_info ssd_log_info
875 875 #define sd_log_err ssd_log_err
876 876 #define sdprobe ssdprobe
877 877 #define sdinfo ssdinfo
878 878 #define sd_prop_op ssd_prop_op
879 879 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
880 880 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
881 881 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
882 882 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
883 883 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init
884 884 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
885 885 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
886 886 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
887 887 #define sd_spin_up_unit ssd_spin_up_unit
888 888 #define sd_enable_descr_sense ssd_enable_descr_sense
889 889 #define sd_reenable_dsense_task ssd_reenable_dsense_task
890 890 #define sd_set_mmc_caps ssd_set_mmc_caps
891 891 #define sd_read_unit_properties ssd_read_unit_properties
892 892 #define sd_process_sdconf_file ssd_process_sdconf_file
893 893 #define sd_process_sdconf_table ssd_process_sdconf_table
894 894 #define sd_sdconf_id_match ssd_sdconf_id_match
895 895 #define sd_blank_cmp ssd_blank_cmp
896 896 #define sd_chk_vers1_data ssd_chk_vers1_data
897 897 #define sd_set_vers1_properties ssd_set_vers1_properties
898 898 #define sd_check_solid_state ssd_check_solid_state
899 899 #define sd_check_emulation_mode ssd_check_emulation_mode
900 900
901 901 #define sd_get_physical_geometry ssd_get_physical_geometry
902 902 #define sd_get_virtual_geometry ssd_get_virtual_geometry
903 903 #define sd_update_block_info ssd_update_block_info
904 904 #define sd_register_devid ssd_register_devid
905 905 #define sd_get_devid ssd_get_devid
906 906 #define sd_create_devid ssd_create_devid
907 907 #define sd_write_deviceid ssd_write_deviceid
908 908 #define sd_check_vpd_page_support ssd_check_vpd_page_support
909 909 #define sd_setup_pm ssd_setup_pm
910 910 #define sd_create_pm_components ssd_create_pm_components
911 911 #define sd_ddi_suspend ssd_ddi_suspend
912 912 #define sd_ddi_resume ssd_ddi_resume
913 913 #define sd_pm_state_change ssd_pm_state_change
914 914 #define sdpower ssdpower
915 915 #define sdattach ssdattach
916 916 #define sddetach ssddetach
917 917 #define sd_unit_attach ssd_unit_attach
918 918 #define sd_unit_detach ssd_unit_detach
919 919 #define sd_set_unit_attributes ssd_set_unit_attributes
920 920 #define sd_create_errstats ssd_create_errstats
921 921 #define sd_set_errstats ssd_set_errstats
922 922 #define sd_set_pstats ssd_set_pstats
923 923 #define sddump ssddump
924 924 #define sd_scsi_poll ssd_scsi_poll
925 925 #define sd_send_polled_RQS ssd_send_polled_RQS
926 926 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll
927 927 #define sd_init_event_callbacks ssd_init_event_callbacks
928 928 #define sd_event_callback ssd_event_callback
929 929 #define sd_cache_control ssd_cache_control
930 930 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled
931 931 #define sd_get_nv_sup ssd_get_nv_sup
932 932 #define sd_make_device ssd_make_device
933 933 #define sdopen ssdopen
934 934 #define sdclose ssdclose
935 935 #define sd_ready_and_valid ssd_ready_and_valid
936 936 #define sdmin ssdmin
937 937 #define sdread ssdread
938 938 #define sdwrite ssdwrite
939 939 #define sdaread ssdaread
940 940 #define sdawrite ssdawrite
941 941 #define sdstrategy ssdstrategy
942 942 #define sdioctl ssdioctl
943 943 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
944 944 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart
945 945 #define sd_checksum_iostart ssd_checksum_iostart
946 946 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
947 947 #define sd_pm_iostart ssd_pm_iostart
948 948 #define sd_core_iostart ssd_core_iostart
949 949 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
950 950 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone
951 951 #define sd_checksum_iodone ssd_checksum_iodone
952 952 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
953 953 #define sd_pm_iodone ssd_pm_iodone
954 954 #define sd_initpkt_for_buf ssd_initpkt_for_buf
955 955 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf
956 956 #define sd_setup_rw_pkt ssd_setup_rw_pkt
957 957 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
958 958 #define sd_buf_iodone ssd_buf_iodone
959 959 #define sd_uscsi_strategy ssd_uscsi_strategy
960 960 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
961 961 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
962 962 #define sd_uscsi_iodone ssd_uscsi_iodone
963 963 #define sd_xbuf_strategy ssd_xbuf_strategy
964 964 #define sd_xbuf_init ssd_xbuf_init
965 965 #define sd_pm_entry ssd_pm_entry
966 966 #define sd_pm_exit ssd_pm_exit
967 967
968 968 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
969 969 #define sd_pm_timeout_handler ssd_pm_timeout_handler
970 970
971 971 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq
972 972 #define sdintr ssdintr
973 973 #define sd_start_cmds ssd_start_cmds
974 974 #define sd_send_scsi_cmd ssd_send_scsi_cmd
975 975 #define sd_bioclone_alloc ssd_bioclone_alloc
976 976 #define sd_bioclone_free ssd_bioclone_free
977 977 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc
978 978 #define sd_shadow_buf_free ssd_shadow_buf_free
979 979 #define sd_print_transport_rejected_message \
980 980 ssd_print_transport_rejected_message
981 981 #define sd_retry_command ssd_retry_command
982 982 #define sd_set_retry_bp ssd_set_retry_bp
983 983 #define sd_send_request_sense_command ssd_send_request_sense_command
984 984 #define sd_start_retry_command ssd_start_retry_command
985 985 #define sd_start_direct_priority_command \
986 986 ssd_start_direct_priority_command
987 987 #define sd_return_failed_command ssd_return_failed_command
988 988 #define sd_return_failed_command_no_restart \
989 989 ssd_return_failed_command_no_restart
990 990 #define sd_return_command ssd_return_command
991 991 #define sd_sync_with_callback ssd_sync_with_callback
992 992 #define sdrunout ssdrunout
993 993 #define sd_mark_rqs_busy ssd_mark_rqs_busy
994 994 #define sd_mark_rqs_idle ssd_mark_rqs_idle
995 995 #define sd_reduce_throttle ssd_reduce_throttle
996 996 #define sd_restore_throttle ssd_restore_throttle
997 997 #define sd_print_incomplete_msg ssd_print_incomplete_msg
998 998 #define sd_init_cdb_limits ssd_init_cdb_limits
999 999 #define sd_pkt_status_good ssd_pkt_status_good
1000 1000 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition
1001 1001 #define sd_pkt_status_busy ssd_pkt_status_busy
1002 1002 #define sd_pkt_status_reservation_conflict \
1003 1003 ssd_pkt_status_reservation_conflict
1004 1004 #define sd_pkt_status_qfull ssd_pkt_status_qfull
1005 1005 #define sd_handle_request_sense ssd_handle_request_sense
1006 1006 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1007 1007 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1008 1008 #define sd_validate_sense_data ssd_validate_sense_data
1009 1009 #define sd_decode_sense ssd_decode_sense
1010 1010 #define sd_print_sense_msg ssd_print_sense_msg
1011 1011 #define sd_sense_key_no_sense ssd_sense_key_no_sense
1012 1012 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1013 1013 #define sd_sense_key_not_ready ssd_sense_key_not_ready
1014 1014 #define sd_sense_key_medium_or_hardware_error \
1015 1015 ssd_sense_key_medium_or_hardware_error
1016 1016 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1017 1017 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1018 1018 #define sd_sense_key_fail_command ssd_sense_key_fail_command
1019 1019 #define sd_sense_key_blank_check ssd_sense_key_blank_check
1020 1020 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1021 1021 #define sd_sense_key_default ssd_sense_key_default
1022 1022 #define sd_print_retry_msg ssd_print_retry_msg
1023 1023 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1024 1024 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1025 1025 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1026 1026 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1027 1027 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1028 1028 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1029 1029 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1030 1030 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1031 1031 #define sd_pkt_reason_default ssd_pkt_reason_default
1032 1032 #define sd_reset_target ssd_reset_target
1033 1033 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1034 1034 #define sd_start_stop_unit_task ssd_start_stop_unit_task
1035 1035 #define sd_taskq_create ssd_taskq_create
1036 1036 #define sd_taskq_delete ssd_taskq_delete
1037 1037 #define sd_target_change_task ssd_target_change_task
1038 1038 #define sd_log_dev_status_event ssd_log_dev_status_event
1039 1039 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1040 1040 #define sd_log_eject_request_event ssd_log_eject_request_event
1041 1041 #define sd_media_change_task ssd_media_change_task
1042 1042 #define sd_handle_mchange ssd_handle_mchange
1043 1043 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1044 1044 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1045 1045 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1046 1046 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1047 1047 #define sd_send_scsi_feature_GET_CONFIGURATION \
1048 1048 sd_send_scsi_feature_GET_CONFIGURATION
1049 1049 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1050 1050 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1051 1051 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1052 1052 #define sd_send_scsi_PERSISTENT_RESERVE_IN \
1053 1053 ssd_send_scsi_PERSISTENT_RESERVE_IN
1054 1054 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1055 1055 ssd_send_scsi_PERSISTENT_RESERVE_OUT
1056 1056 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1057 1057 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1058 1058 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1059 1059 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1060 1060 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1061 1061 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1062 1062 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1063 1063 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1064 1064 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1065 1065 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1066 1066 #define sd_alloc_rqs ssd_alloc_rqs
1067 1067 #define sd_free_rqs ssd_free_rqs
1068 1068 #define sd_dump_memory ssd_dump_memory
1069 1069 #define sd_get_media_info_com ssd_get_media_info_com
1070 1070 #define sd_get_media_info ssd_get_media_info
1071 1071 #define sd_get_media_info_ext ssd_get_media_info_ext
1072 1072 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1073 1073 #define sd_nvpair_str_decode ssd_nvpair_str_decode
1074 1074 #define sd_strtok_r ssd_strtok_r
1075 1075 #define sd_set_properties ssd_set_properties
1076 1076 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1077 1077 #define sd_setup_next_xfer ssd_setup_next_xfer
1078 1078 #define sd_dkio_get_temp ssd_dkio_get_temp
1079 1079 #define sd_check_mhd ssd_check_mhd
1080 1080 #define sd_mhd_watch_cb ssd_mhd_watch_cb
1081 1081 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1082 1082 #define sd_sname ssd_sname
1083 1083 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1084 1084 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1085 1085 #define sd_take_ownership ssd_take_ownership
1086 1086 #define sd_reserve_release ssd_reserve_release
1087 1087 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1088 1088 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1089 1089 #define sd_persistent_reservation_in_read_keys \
1090 1090 ssd_persistent_reservation_in_read_keys
1091 1091 #define sd_persistent_reservation_in_read_resv \
1092 1092 ssd_persistent_reservation_in_read_resv
1093 1093 #define sd_mhdioc_takeown ssd_mhdioc_takeown
1094 1094 #define sd_mhdioc_failfast ssd_mhdioc_failfast
1095 1095 #define sd_mhdioc_release ssd_mhdioc_release
1096 1096 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1097 1097 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1098 1098 #define sd_mhdioc_inresv ssd_mhdioc_inresv
1099 1099 #define sr_change_blkmode ssr_change_blkmode
1100 1100 #define sr_change_speed ssr_change_speed
1101 1101 #define sr_atapi_change_speed ssr_atapi_change_speed
1102 1102 #define sr_pause_resume ssr_pause_resume
1103 1103 #define sr_play_msf ssr_play_msf
1104 1104 #define sr_play_trkind ssr_play_trkind
1105 1105 #define sr_read_all_subcodes ssr_read_all_subcodes
1106 1106 #define sr_read_subchannel ssr_read_subchannel
1107 1107 #define sr_read_tocentry ssr_read_tocentry
1108 1108 #define sr_read_tochdr ssr_read_tochdr
1109 1109 #define sr_read_cdda ssr_read_cdda
1110 1110 #define sr_read_cdxa ssr_read_cdxa
1111 1111 #define sr_read_mode1 ssr_read_mode1
1112 1112 #define sr_read_mode2 ssr_read_mode2
1113 1113 #define sr_read_cd_mode2 ssr_read_cd_mode2
1114 1114 #define sr_sector_mode ssr_sector_mode
1115 1115 #define sr_eject ssr_eject
1116 1116 #define sr_ejected ssr_ejected
1117 1117 #define sr_check_wp ssr_check_wp
1118 1118 #define sd_watch_request_submit ssd_watch_request_submit
1119 1119 #define sd_check_media ssd_check_media
1120 1120 #define sd_media_watch_cb ssd_media_watch_cb
1121 1121 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1122 1122 #define sr_volume_ctrl ssr_volume_ctrl
1123 1123 #define sr_read_sony_session_offset ssr_read_sony_session_offset
1124 1124 #define sd_log_page_supported ssd_log_page_supported
1125 1125 #define sd_check_for_writable_cd ssd_check_for_writable_cd
1126 1126 #define sd_wm_cache_constructor ssd_wm_cache_constructor
1127 1127 #define sd_wm_cache_destructor ssd_wm_cache_destructor
1128 1128 #define sd_range_lock ssd_range_lock
1129 1129 #define sd_get_range ssd_get_range
1130 1130 #define sd_free_inlist_wmap ssd_free_inlist_wmap
1131 1131 #define sd_range_unlock ssd_range_unlock
1132 1132 #define sd_read_modify_write_task ssd_read_modify_write_task
1133 1133 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1134 1134
1135 1135 #define sd_iostart_chain ssd_iostart_chain
1136 1136 #define sd_iodone_chain ssd_iodone_chain
1137 1137 #define sd_initpkt_map ssd_initpkt_map
1138 1138 #define sd_destroypkt_map ssd_destroypkt_map
1139 1139 #define sd_chain_type_map ssd_chain_type_map
1140 1140 #define sd_chain_index_map ssd_chain_index_map
1141 1141
1142 1142 #define sd_failfast_flushctl ssd_failfast_flushctl
1143 1143 #define sd_failfast_flushq ssd_failfast_flushq
1144 1144 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1145 1145
1146 1146 #define sd_is_lsi ssd_is_lsi
1147 1147 #define sd_tg_rdwr ssd_tg_rdwr
1148 1148 #define sd_tg_getinfo ssd_tg_getinfo
1149 1149 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1150 1150
1151 1151 #endif /* #if (defined(__fibre)) */
1152 1152
1153 1153
1154 1154 int _init(void);
1155 1155 int _fini(void);
1156 1156 int _info(struct modinfo *modinfop);
1157 1157
1158 1158 /*PRINTFLIKE3*/
1159 1159 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1160 1160 /*PRINTFLIKE3*/
1161 1161 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1162 1162 /*PRINTFLIKE3*/
1163 1163 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1164 1164
1165 1165 static int sdprobe(dev_info_t *devi);
1166 1166 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
1167 1167 void **result);
1168 1168 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1169 1169 int mod_flags, char *name, caddr_t valuep, int *lengthp);
1170 1170
1171 1171 /*
1172 1172 * Smart probe for parallel scsi
1173 1173 */
1174 1174 static void sd_scsi_probe_cache_init(void);
1175 1175 static void sd_scsi_probe_cache_fini(void);
1176 1176 static void sd_scsi_clear_probe_cache(void);
1177 1177 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
1178 1178
1179 1179 /*
1180 1180 * Attached luns on target for parallel scsi
1181 1181 */
1182 1182 static void sd_scsi_target_lun_init(void);
1183 1183 static void sd_scsi_target_lun_fini(void);
1184 1184 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
1185 1185 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
1186 1186
1187 1187 static int sd_spin_up_unit(sd_ssc_t *ssc);
1188 1188
1189 1189 /*
1190 1190 * Using sd_ssc_init to establish sd_ssc_t struct
1191 1191 * Using sd_ssc_send to send uscsi internal command
1192 1192 * Using sd_ssc_fini to free sd_ssc_t struct
1193 1193 */
1194 1194 static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1195 1195 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1196 1196 int flag, enum uio_seg dataspace, int path_flag);
1197 1197 static void sd_ssc_fini(sd_ssc_t *ssc);
1198 1198
1199 1199 /*
1200 1200 * Using sd_ssc_assessment to set correct type-of-assessment
1201 1201 * Using sd_ssc_post to post ereport & system log
1202 1202 * sd_ssc_post will call sd_ssc_print to print system log
1203 1203 * sd_ssc_post will call sd_ssd_ereport_post to post ereport
1204 1204 */
1205 1205 static void sd_ssc_assessment(sd_ssc_t *ssc,
1206 1206 enum sd_type_assessment tp_assess);
1207 1207
1208 1208 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1209 1209 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1210 1210 static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1211 1211 enum sd_driver_assessment drv_assess);
1212 1212
1213 1213 /*
1214 1214 * Using sd_ssc_set_info to mark an un-decodable-data error.
1215 1215 * Using sd_ssc_extract_info to transfer information from internal
1216 1216 * data structures to sd_ssc_t.
1217 1217 */
1218 1218 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
1219 1219 const char *fmt, ...);
1220 1220 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
1221 1221 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
1222 1222
1223 1223 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1224 1224 enum uio_seg dataspace, int path_flag);
1225 1225
1226 1226 #ifdef _LP64
1227 1227 static void sd_enable_descr_sense(sd_ssc_t *ssc);
1228 1228 static void sd_reenable_dsense_task(void *arg);
1229 1229 #endif /* _LP64 */
1230 1230
1231 1231 static void sd_set_mmc_caps(sd_ssc_t *ssc);
1232 1232
1233 1233 static void sd_read_unit_properties(struct sd_lun *un);
1234 1234 static int sd_process_sdconf_file(struct sd_lun *un);
1235 1235 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
1236 1236 static char *sd_strtok_r(char *string, const char *sepset, char **lasts);
1237 1237 static void sd_set_properties(struct sd_lun *un, char *name, char *value);
1238 1238 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
1239 1239 int *data_list, sd_tunables *values);
1240 1240 static void sd_process_sdconf_table(struct sd_lun *un);
1241 1241 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
1242 1242 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
1243 1243 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
1244 1244 int list_len, char *dataname_ptr);
1245 1245 static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1246 1246 sd_tunables *prop_list);
1247 1247
1248 1248 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1249 1249 int reservation_flag);
1250 1250 static int sd_get_devid(sd_ssc_t *ssc);
1251 1251 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1252 1252 static int sd_write_deviceid(sd_ssc_t *ssc);
1253 1253 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len);
1254 1254 static int sd_check_vpd_page_support(sd_ssc_t *ssc);
1255 1255
1256 1256 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1257 1257 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
1258 1258
1259 1259 static int sd_ddi_suspend(dev_info_t *devi);
1260 1260 static int sd_ddi_resume(dev_info_t *devi);
1261 1261 static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
1262 1262 static int sdpower(dev_info_t *devi, int component, int level);
1263 1263
1264 1264 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1265 1265 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1266 1266 static int sd_unit_attach(dev_info_t *devi);
1267 1267 static int sd_unit_detach(dev_info_t *devi);
1268 1268
1269 1269 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1270 1270 static void sd_create_errstats(struct sd_lun *un, int instance);
1271 1271 static void sd_set_errstats(struct sd_lun *un);
1272 1272 static void sd_set_pstats(struct sd_lun *un);
1273 1273
1274 1274 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1275 1275 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1276 1276 static int sd_send_polled_RQS(struct sd_lun *un);
1277 1277 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1278 1278
1279 1279 #if (defined(__fibre))
1280 1280 /*
1281 1281 * Event callbacks (photon)
1282 1282 */
1283 1283 static void sd_init_event_callbacks(struct sd_lun *un);
1284 1284 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1285 1285 #endif
1286 1286
1287 1287 /*
1288 1288 * Defines for sd_cache_control
1289 1289 */
1290 1290
1291 1291 #define SD_CACHE_ENABLE 1
1292 1292 #define SD_CACHE_DISABLE 0
1293 1293 #define SD_CACHE_NOCHANGE -1
1294 1294
1295 1295 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1296 1296 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1297 1297 static void sd_get_nv_sup(sd_ssc_t *ssc);
1298 1298 static dev_t sd_make_device(dev_info_t *devi);
1299 1299 static void sd_check_solid_state(sd_ssc_t *ssc);
1300 1300 static void sd_check_emulation_mode(sd_ssc_t *ssc);
1301 1301 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
1302 1302 uint64_t capacity);
1303 1303
1304 1304 /*
1305 1305 * Driver entry point functions.
1306 1306 */
1307 1307 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
1308 1308 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
1309 1309 static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
1310 1310
1311 1311 static void sdmin(struct buf *bp);
1312 1312 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
1313 1313 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
1314 1314 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1315 1315 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1316 1316
1317 1317 static int sdstrategy(struct buf *bp);
1318 1318 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1319 1319
1320 1320 /*
1321 1321 * Function prototypes for layering functions in the iostart chain.
1322 1322 */
1323 1323 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
1324 1324 struct buf *bp);
1325 1325 static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
1326 1326 struct buf *bp);
1327 1327 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
1328 1328 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
1329 1329 struct buf *bp);
1330 1330 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
1331 1331 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
1332 1332
1333 1333 /*
1334 1334 * Function prototypes for layering functions in the iodone chain.
1335 1335 */
1336 1336 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
1337 1337 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
1338 1338 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
1339 1339 struct buf *bp);
1340 1340 static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
1341 1341 struct buf *bp);
1342 1342 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
1343 1343 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
1344 1344 struct buf *bp);
1345 1345 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
1346 1346
1347 1347 /*
1348 1348 * Prototypes for functions to support buf(9S) based IO.
1349 1349 */
1350 1350 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
1351 1351 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
1352 1352 static void sd_destroypkt_for_buf(struct buf *);
1353 1353 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
1354 1354 struct buf *bp, int flags,
1355 1355 int (*callback)(caddr_t), caddr_t callback_arg,
1356 1356 diskaddr_t lba, uint32_t blockcount);
1357 1357 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
1358 1358 struct buf *bp, diskaddr_t lba, uint32_t blockcount);
1359 1359
1360 1360 /*
1361 1361 * Prototypes for functions to support USCSI IO.
1362 1362 */
1363 1363 static int sd_uscsi_strategy(struct buf *bp);
1364 1364 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
1365 1365 static void sd_destroypkt_for_uscsi(struct buf *);
1366 1366
1367 1367 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
1368 1368 uchar_t chain_type, void *pktinfop);
1369 1369
1370 1370 static int sd_pm_entry(struct sd_lun *un);
1371 1371 static void sd_pm_exit(struct sd_lun *un);
1372 1372
1373 1373 static void sd_pm_idletimeout_handler(void *arg);
1374 1374
1375 1375 /*
1376 1376 * sd_core internal functions (used at the sd_core_io layer).
1377 1377 */
1378 1378 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
1379 1379 static void sdintr(struct scsi_pkt *pktp);
1380 1380 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
1381 1381
1382 1382 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1383 1383 enum uio_seg dataspace, int path_flag);
1384 1384
1385 1385 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
1386 1386 daddr_t blkno, int (*func)(struct buf *));
1387 1387 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
1388 1388 uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
1389 1389 static void sd_bioclone_free(struct buf *bp);
1390 1390 static void sd_shadow_buf_free(struct buf *bp);
1391 1391
1392 1392 static void sd_print_transport_rejected_message(struct sd_lun *un,
1393 1393 struct sd_xbuf *xp, int code);
1394 1394 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
1395 1395 void *arg, int code);
1396 1396 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
1397 1397 void *arg, int code);
1398 1398 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
1399 1399 void *arg, int code);
1400 1400
1401 1401 static void sd_retry_command(struct sd_lun *un, struct buf *bp,
1402 1402 int retry_check_flag,
1403 1403 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1404 1404 int c),
1405 1405 void *user_arg, int failure_code, clock_t retry_delay,
1406 1406 void (*statp)(kstat_io_t *));
1407 1407
1408 1408 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1409 1409 clock_t retry_delay, void (*statp)(kstat_io_t *));
1410 1410
1411 1411 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1412 1412 struct scsi_pkt *pktp);
1413 1413 static void sd_start_retry_command(void *arg);
1414 1414 static void sd_start_direct_priority_command(void *arg);
1415 1415 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1416 1416 int errcode);
1417 1417 static void sd_return_failed_command_no_restart(struct sd_lun *un,
1418 1418 struct buf *bp, int errcode);
1419 1419 static void sd_return_command(struct sd_lun *un, struct buf *bp);
1420 1420 static void sd_sync_with_callback(struct sd_lun *un);
1421 1421 static int sdrunout(caddr_t arg);
1422 1422
1423 1423 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
1424 1424 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
1425 1425
1426 1426 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
1427 1427 static void sd_restore_throttle(void *arg);
1428 1428
1429 1429 static void sd_init_cdb_limits(struct sd_lun *un);
1430 1430
1431 1431 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
1432 1432 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1433 1433
1434 1434 /*
1435 1435 * Error handling functions
1436 1436 */
1437 1437 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
1438 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1439 1439 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
1440 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1441 1441 static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
1442 1442 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1443 1443 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
1444 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1445 1445
1446 1446 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
1447 1447 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1448 1448 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
1449 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1450 1450 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
1451 1451 struct sd_xbuf *xp, size_t actual_len);
1452 1452 static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
1453 1453 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1454 1454
1455 1455 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
1456 1456 void *arg, int code);
1457 1457
1458 1458 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
1459 1459 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1460 1460 static void sd_sense_key_recoverable_error(struct sd_lun *un,
1461 1461 uint8_t *sense_datap,
1462 1462 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1463 1463 static void sd_sense_key_not_ready(struct sd_lun *un,
1464 1464 uint8_t *sense_datap,
1465 1465 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1466 1466 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
1467 1467 uint8_t *sense_datap,
1468 1468 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1469 1469 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
1470 1470 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1471 1471 static void sd_sense_key_unit_attention(struct sd_lun *un,
1472 1472 uint8_t *sense_datap,
1473 1473 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1474 1474 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
1475 1475 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1476 1476 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
1477 1477 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1478 1478 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
1479 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1480 1480 static void sd_sense_key_default(struct sd_lun *un,
1481 1481 uint8_t *sense_datap,
1482 1482 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1483 1483
1484 1484 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
1485 1485 void *arg, int flag);
1486 1486
1487 1487 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
1488 1488 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1489 1489 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
1490 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1491 1491 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
1492 1492 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1493 1493 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
1494 1494 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1495 1495 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
1496 1496 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1497 1497 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
1498 1498 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1499 1499 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
1500 1500 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1501 1501 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
1502 1502 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1503 1503
1504 1504 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
1505 1505
1506 1506 static void sd_start_stop_unit_callback(void *arg);
1507 1507 static void sd_start_stop_unit_task(void *arg);
1508 1508
1509 1509 static void sd_taskq_create(void);
1510 1510 static void sd_taskq_delete(void);
1511 1511 static void sd_target_change_task(void *arg);
1512 1512 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
1513 1513 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
1514 1514 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
1515 1515 static void sd_media_change_task(void *arg);
1516 1516
1517 1517 static int sd_handle_mchange(struct sd_lun *un);
1518 1518 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
1519 1519 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
1520 1520 uint32_t *lbap, int path_flag);
1521 1521 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
1522 1522 uint32_t *lbap, uint32_t *psp, int path_flag);
1523 1523 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
1524 1524 int flag, int path_flag);
1525 1525 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1526 1526 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1527 1527 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1528 1528 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1529 1529 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1530 1530 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1531 1531 uchar_t usr_cmd, uchar_t *usr_bufp);
1532 1532 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1533 1533 struct dk_callback *dkc);
1534 1534 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
1535 1535 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1536 1536 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1537 1537 uchar_t *bufaddr, uint_t buflen, int path_flag);
1538 1538 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1539 1539 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1540 1540 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1541 1541 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1542 1542 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1543 1543 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1544 1544 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
1545 1545 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
1546 1546 size_t buflen, daddr_t start_block, int path_flag);
1547 1547 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1548 1548 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1549 1549 path_flag)
1550 1550 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1551 1551 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1552 1552 path_flag)
1553 1553
1554 1554 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
1555 1555 uint16_t buflen, uchar_t page_code, uchar_t page_control,
1556 1556 uint16_t param_ptr, int path_flag);
1557 1557 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
1558 1558 uchar_t *bufaddr, size_t buflen, uchar_t class_req);
1559 1559 static boolean_t sd_gesn_media_data_valid(uchar_t *data);
1560 1560
1561 1561 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
1562 1562 static void sd_free_rqs(struct sd_lun *un);
1563 1563
1564 1564 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
1565 1565 uchar_t *data, int len, int fmt);
1566 1566 static void sd_panic_for_res_conflict(struct sd_lun *un);
1567 1567
1568 1568 /*
1569 1569 * Disk Ioctl Function Prototypes
1570 1570 */
1571 1571 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
1572 1572 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
1573 1573 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
1574 1574 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
1575 1575
1576 1576 /*
1577 1577 * Multi-host Ioctl Prototypes
1578 1578 */
1579 1579 static int sd_check_mhd(dev_t dev, int interval);
1580 1580 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1581 1581 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
1582 1582 static char *sd_sname(uchar_t status);
1583 1583 static void sd_mhd_resvd_recover(void *arg);
1584 1584 static void sd_resv_reclaim_thread();
1585 1585 static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
1586 1586 static int sd_reserve_release(dev_t dev, int cmd);
1587 1587 static void sd_rmv_resv_reclaim_req(dev_t dev);
1588 1588 static void sd_mhd_reset_notify_cb(caddr_t arg);
1589 1589 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
1590 1590 mhioc_inkeys_t *usrp, int flag);
1591 1591 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
1592 1592 mhioc_inresvs_t *usrp, int flag);
1593 1593 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
1594 1594 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
1595 1595 static int sd_mhdioc_release(dev_t dev);
1596 1596 static int sd_mhdioc_register_devid(dev_t dev);
1597 1597 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
1598 1598 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
1599 1599
1600 1600 /*
1601 1601 * SCSI removable prototypes
1602 1602 */
1603 1603 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
1604 1604 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1605 1605 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1606 1606 static int sr_pause_resume(dev_t dev, int mode);
1607 1607 static int sr_play_msf(dev_t dev, caddr_t data, int flag);
1608 1608 static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
1609 1609 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
1610 1610 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
1611 1611 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
1612 1612 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
1613 1613 static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
1614 1614 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
1615 1615 static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
1616 1616 static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
1617 1617 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
1618 1618 static int sr_sector_mode(dev_t dev, uint32_t blksize);
1619 1619 static int sr_eject(dev_t dev);
1620 1620 static void sr_ejected(register struct sd_lun *un);
1621 1621 static int sr_check_wp(dev_t dev);
1622 1622 static opaque_t sd_watch_request_submit(struct sd_lun *un);
1623 1623 static int sd_check_media(dev_t dev, enum dkio_state state);
1624 1624 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1625 1625 static void sd_delayed_cv_broadcast(void *arg);
1626 1626 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1627 1627 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1628 1628
1629 1629 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
1630 1630
1631 1631 /*
1632 1632 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1633 1633 */
1634 1634 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1635 1635 static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1636 1636 static void sd_wm_cache_destructor(void *wm, void *un);
1637 1637 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1638 1638 daddr_t endb, ushort_t typ);
1639 1639 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
1640 1640 daddr_t endb);
1641 1641 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
1642 1642 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1643 1643 static void sd_read_modify_write_task(void * arg);
1644 1644 static int
1645 1645 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1646 1646 struct buf **bpp);
1647 1647
1648 1648
1649 1649 /*
1650 1650 * Function prototypes for failfast support.
1651 1651 */
1652 1652 static void sd_failfast_flushq(struct sd_lun *un);
1653 1653 static int sd_failfast_flushq_callback(struct buf *bp);
1654 1654
1655 1655 /*
1656 1656 * Function prototypes to check for lsi devices
1657 1657 */
1658 1658 static void sd_is_lsi(struct sd_lun *un);
1659 1659
1660 1660 /*
1661 1661 * Function prototypes for partial DMA support
1662 1662 */
1663 1663 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
1664 1664 struct scsi_pkt *pkt, struct sd_xbuf *xp);
1665 1665
1666 1666
1667 1667 /* Function prototypes for cmlb */
1668 1668 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
1669 1669 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
1670 1670
1671 1671 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
1672 1672
1673 1673 /*
1674 1674 * For printing RMW warning message timely
1675 1675 */
1676 1676 static void sd_rmw_msg_print_handler(void *arg);
1677 1677
1678 1678 /*
1679 1679 * Constants for failfast support:
1680 1680 *
1681 1681 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
1682 1682 * failfast processing being performed.
1683 1683 *
1684 1684 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1685 1685 * failfast processing on all bufs with B_FAILFAST set.
1686 1686 */
1687 1687
1688 1688 #define SD_FAILFAST_INACTIVE 0
1689 1689 #define SD_FAILFAST_ACTIVE 1
1690 1690
1691 1691 /*
1692 1692 * Bitmask to control behavior of buf(9S) flushes when a transition to
1693 1693 * the failfast state occurs. Optional bits include:
1694 1694 *
1695 1695 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1696 1696 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1697 1697 * be flushed.
1698 1698 *
1699 1699 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1700 1700 * driver, in addition to the regular wait queue. This includes the xbuf
1701 1701 * queues. When clear, only the driver's wait queue will be flushed.
1702 1702 */
1703 1703 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1704 1704 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1705 1705
1706 1706 /*
1707 1707 * The default behavior is to only flush bufs that have B_FAILFAST set, but
1708 1708 * to flush all queues within the driver.
1709 1709 */
1710 1710 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
1711 1711
1712 1712
1713 1713 /*
1714 1714 * SD Testing Fault Injection
1715 1715 */
1716 1716 #ifdef SD_FAULT_INJECTION
1717 1717 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1718 1718 static void sd_faultinjection(struct scsi_pkt *pktp);
1719 1719 static void sd_injection_log(char *buf, struct sd_lun *un);
1720 1720 #endif
1721 1721
1722 1722 /*
1723 1723 * Device driver ops vector
1724 1724 */
1725 1725 static struct cb_ops sd_cb_ops = {
1726 1726 sdopen, /* open */
1727 1727 sdclose, /* close */
1728 1728 sdstrategy, /* strategy */
1729 1729 nodev, /* print */
1730 1730 sddump, /* dump */
1731 1731 sdread, /* read */
1732 1732 sdwrite, /* write */
1733 1733 sdioctl, /* ioctl */
1734 1734 nodev, /* devmap */
1735 1735 nodev, /* mmap */
1736 1736 nodev, /* segmap */
1737 1737 nochpoll, /* poll */
1738 1738 sd_prop_op, /* cb_prop_op */
1739 1739 0, /* streamtab */
1740 1740 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */
1741 1741 CB_REV, /* cb_rev */
1742 1742 sdaread, /* async I/O read entry point */
1743 1743 sdawrite /* async I/O write entry point */
1744 1744 };
1745 1745
1746 1746 struct dev_ops sd_ops = {
1747 1747 DEVO_REV, /* devo_rev, */
1748 1748 0, /* refcnt */
1749 1749 sdinfo, /* info */
1750 1750 nulldev, /* identify */
1751 1751 sdprobe, /* probe */
1752 1752 sdattach, /* attach */
1753 1753 sddetach, /* detach */
1754 1754 nodev, /* reset */
1755 1755 &sd_cb_ops, /* driver operations */
1756 1756 NULL, /* bus operations */
1757 1757 sdpower, /* power */
1758 1758 ddi_quiesce_not_needed, /* quiesce */
1759 1759 };
1760 1760
1761 1761 /*
1762 1762 * This is the loadable module wrapper.
1763 1763 */
1764 1764 #include <sys/modctl.h>
1765 1765
1766 1766 #ifndef XPV_HVM_DRIVER
1767 1767 static struct modldrv modldrv = {
1768 1768 &mod_driverops, /* Type of module. This one is a driver */
1769 1769 SD_MODULE_NAME, /* Module name. */
1770 1770 &sd_ops /* driver ops */
1771 1771 };
1772 1772
1773 1773 static struct modlinkage modlinkage = {
1774 1774 MODREV_1, &modldrv, NULL
1775 1775 };
1776 1776
1777 1777 #else /* XPV_HVM_DRIVER */
1778 1778 static struct modlmisc modlmisc = {
1779 1779 &mod_miscops, /* Type of module. This one is a misc */
1780 1780 "HVM " SD_MODULE_NAME, /* Module name. */
1781 1781 };
1782 1782
1783 1783 static struct modlinkage modlinkage = {
1784 1784 MODREV_1, &modlmisc, NULL
1785 1785 };
1786 1786
1787 1787 #endif /* XPV_HVM_DRIVER */
1788 1788
1789 1789 static cmlb_tg_ops_t sd_tgops = {
1790 1790 TG_DK_OPS_VERSION_1,
1791 1791 sd_tg_rdwr,
1792 1792 sd_tg_getinfo
1793 1793 };
1794 1794
1795 1795 static struct scsi_asq_key_strings sd_additional_codes[] = {
1796 1796 0x81, 0, "Logical Unit is Reserved",
1797 1797 0x85, 0, "Audio Address Not Valid",
1798 1798 0xb6, 0, "Media Load Mechanism Failed",
1799 1799 0xB9, 0, "Audio Play Operation Aborted",
1800 1800 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1801 1801 0x53, 2, "Medium removal prevented",
1802 1802 0x6f, 0, "Authentication failed during key exchange",
1803 1803 0x6f, 1, "Key not present",
1804 1804 0x6f, 2, "Key not established",
1805 1805 0x6f, 3, "Read without proper authentication",
1806 1806 0x6f, 4, "Mismatched region to this logical unit",
1807 1807 0x6f, 5, "Region reset count error",
1808 1808 0xffff, 0x0, NULL
1809 1809 };
1810 1810
1811 1811
1812 1812 /*
1813 1813 * Struct for passing printing information for sense data messages
1814 1814 */
1815 1815 struct sd_sense_info {
1816 1816 int ssi_severity;
1817 1817 int ssi_pfa_flag;
1818 1818 };
1819 1819
1820 1820 /*
1821 1821 * Table of function pointers for iostart-side routines. Separate "chains"
1822 1822 * of layered function calls are formed by placing the function pointers
1823 1823 * sequentially in the desired order. Functions are called according to an
1824 1824 * incrementing table index ordering. The last function in each chain must
1825 1825 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1826 1826 * in the sd_iodone_chain[] array.
1827 1827 *
1828 1828 * Note: It may seem more natural to organize both the iostart and iodone
1829 1829 * functions together, into an array of structures (or some similar
1830 1830 * organization) with a common index, rather than two separate arrays which
1831 1831 * must be maintained in synchronization. The purpose of this division is
1832 1832 * to achieve improved performance: individual arrays allows for more
1833 1833 * effective cache line utilization on certain platforms.
1834 1834 */
1835 1835
1836 1836 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
1837 1837
1838 1838
1839 1839 static sd_chain_t sd_iostart_chain[] = {
1840 1840
1841 1841 /* Chain for buf IO for disk drive targets (PM enabled) */
1842 1842 sd_mapblockaddr_iostart, /* Index: 0 */
1843 1843 sd_pm_iostart, /* Index: 1 */
1844 1844 sd_core_iostart, /* Index: 2 */
1845 1845
1846 1846 /* Chain for buf IO for disk drive targets (PM disabled) */
1847 1847 sd_mapblockaddr_iostart, /* Index: 3 */
1848 1848 sd_core_iostart, /* Index: 4 */
1849 1849
1850 1850 /*
1851 1851 * Chain for buf IO for removable-media or large sector size
1852 1852 * disk drive targets with RMW needed (PM enabled)
1853 1853 */
1854 1854 sd_mapblockaddr_iostart, /* Index: 5 */
1855 1855 sd_mapblocksize_iostart, /* Index: 6 */
1856 1856 sd_pm_iostart, /* Index: 7 */
1857 1857 sd_core_iostart, /* Index: 8 */
1858 1858
1859 1859 /*
1860 1860 * Chain for buf IO for removable-media or large sector size
1861 1861 * disk drive targets with RMW needed (PM disabled)
1862 1862 */
1863 1863 sd_mapblockaddr_iostart, /* Index: 9 */
1864 1864 sd_mapblocksize_iostart, /* Index: 10 */
1865 1865 sd_core_iostart, /* Index: 11 */
1866 1866
1867 1867 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1868 1868 sd_mapblockaddr_iostart, /* Index: 12 */
1869 1869 sd_checksum_iostart, /* Index: 13 */
1870 1870 sd_pm_iostart, /* Index: 14 */
1871 1871 sd_core_iostart, /* Index: 15 */
1872 1872
1873 1873 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1874 1874 sd_mapblockaddr_iostart, /* Index: 16 */
1875 1875 sd_checksum_iostart, /* Index: 17 */
1876 1876 sd_core_iostart, /* Index: 18 */
1877 1877
1878 1878 /* Chain for USCSI commands (all targets) */
1879 1879 sd_pm_iostart, /* Index: 19 */
1880 1880 sd_core_iostart, /* Index: 20 */
1881 1881
1882 1882 /* Chain for checksumming USCSI commands (all targets) */
1883 1883 sd_checksum_uscsi_iostart, /* Index: 21 */
1884 1884 sd_pm_iostart, /* Index: 22 */
1885 1885 sd_core_iostart, /* Index: 23 */
1886 1886
1887 1887 /* Chain for "direct" USCSI commands (all targets) */
1888 1888 sd_core_iostart, /* Index: 24 */
1889 1889
1890 1890 /* Chain for "direct priority" USCSI commands (all targets) */
1891 1891 sd_core_iostart, /* Index: 25 */
1892 1892
1893 1893 /*
1894 1894 * Chain for buf IO for large sector size disk drive targets
1895 1895 * with RMW needed with checksumming (PM enabled)
1896 1896 */
1897 1897 sd_mapblockaddr_iostart, /* Index: 26 */
1898 1898 sd_mapblocksize_iostart, /* Index: 27 */
1899 1899 sd_checksum_iostart, /* Index: 28 */
1900 1900 sd_pm_iostart, /* Index: 29 */
1901 1901 sd_core_iostart, /* Index: 30 */
1902 1902
1903 1903 /*
1904 1904 * Chain for buf IO for large sector size disk drive targets
1905 1905 * with RMW needed with checksumming (PM disabled)
1906 1906 */
1907 1907 sd_mapblockaddr_iostart, /* Index: 31 */
1908 1908 sd_mapblocksize_iostart, /* Index: 32 */
1909 1909 sd_checksum_iostart, /* Index: 33 */
1910 1910 sd_core_iostart, /* Index: 34 */
1911 1911
1912 1912 };
1913 1913
1914 1914 /*
1915 1915 * Macros to locate the first function of each iostart chain in the
1916 1916 * sd_iostart_chain[] array. These are located by the index in the array.
1917 1917 */
1918 1918 #define SD_CHAIN_DISK_IOSTART 0
1919 1919 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1920 1920 #define SD_CHAIN_MSS_DISK_IOSTART 5
1921 1921 #define SD_CHAIN_RMMEDIA_IOSTART 5
1922 1922 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1923 1923 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1924 1924 #define SD_CHAIN_CHKSUM_IOSTART 12
1925 1925 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1926 1926 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1927 1927 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1928 1928 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1929 1929 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1930 1930 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1931 1931 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1932 1932
1933 1933
1934 1934 /*
1935 1935 * Table of function pointers for the iodone-side routines for the driver-
1936 1936 * internal layering mechanism. The calling sequence for iodone routines
1937 1937 * uses a decrementing table index, so the last routine called in a chain
1938 1938 * must be at the lowest array index location for that chain. The last
1939 1939 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1940 1940 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1941 1941 * of the functions in an iodone side chain must correspond to the ordering
1942 1942 * of the iostart routines for that chain. Note that there is no iodone
1943 1943 * side routine that corresponds to sd_core_iostart(), so there is no
1944 1944 * entry in the table for this.
1945 1945 */
1946 1946
1947 1947 static sd_chain_t sd_iodone_chain[] = {
1948 1948
1949 1949 /* Chain for buf IO for disk drive targets (PM enabled) */
1950 1950 sd_buf_iodone, /* Index: 0 */
1951 1951 sd_mapblockaddr_iodone, /* Index: 1 */
1952 1952 sd_pm_iodone, /* Index: 2 */
1953 1953
1954 1954 /* Chain for buf IO for disk drive targets (PM disabled) */
1955 1955 sd_buf_iodone, /* Index: 3 */
1956 1956 sd_mapblockaddr_iodone, /* Index: 4 */
1957 1957
1958 1958 /*
1959 1959 * Chain for buf IO for removable-media or large sector size
1960 1960 * disk drive targets with RMW needed (PM enabled)
1961 1961 */
1962 1962 sd_buf_iodone, /* Index: 5 */
1963 1963 sd_mapblockaddr_iodone, /* Index: 6 */
1964 1964 sd_mapblocksize_iodone, /* Index: 7 */
1965 1965 sd_pm_iodone, /* Index: 8 */
1966 1966
1967 1967 /*
1968 1968 * Chain for buf IO for removable-media or large sector size
1969 1969 * disk drive targets with RMW needed (PM disabled)
1970 1970 */
1971 1971 sd_buf_iodone, /* Index: 9 */
1972 1972 sd_mapblockaddr_iodone, /* Index: 10 */
1973 1973 sd_mapblocksize_iodone, /* Index: 11 */
1974 1974
1975 1975 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1976 1976 sd_buf_iodone, /* Index: 12 */
1977 1977 sd_mapblockaddr_iodone, /* Index: 13 */
1978 1978 sd_checksum_iodone, /* Index: 14 */
1979 1979 sd_pm_iodone, /* Index: 15 */
1980 1980
1981 1981 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1982 1982 sd_buf_iodone, /* Index: 16 */
1983 1983 sd_mapblockaddr_iodone, /* Index: 17 */
1984 1984 sd_checksum_iodone, /* Index: 18 */
1985 1985
1986 1986 /* Chain for USCSI commands (non-checksum targets) */
1987 1987 sd_uscsi_iodone, /* Index: 19 */
1988 1988 sd_pm_iodone, /* Index: 20 */
1989 1989
1990 1990 /* Chain for USCSI commands (checksum targets) */
1991 1991 sd_uscsi_iodone, /* Index: 21 */
1992 1992 sd_checksum_uscsi_iodone, /* Index: 22 */
1993 1993 sd_pm_iodone, /* Index: 22 */
1994 1994
1995 1995 /* Chain for "direct" USCSI commands (all targets) */
1996 1996 sd_uscsi_iodone, /* Index: 24 */
1997 1997
1998 1998 /* Chain for "direct priority" USCSI commands (all targets) */
1999 1999 sd_uscsi_iodone, /* Index: 25 */
2000 2000
2001 2001 /*
2002 2002 * Chain for buf IO for large sector size disk drive targets
2003 2003 * with checksumming (PM enabled)
2004 2004 */
2005 2005 sd_buf_iodone, /* Index: 26 */
2006 2006 sd_mapblockaddr_iodone, /* Index: 27 */
2007 2007 sd_mapblocksize_iodone, /* Index: 28 */
2008 2008 sd_checksum_iodone, /* Index: 29 */
2009 2009 sd_pm_iodone, /* Index: 30 */
2010 2010
2011 2011 /*
2012 2012 * Chain for buf IO for large sector size disk drive targets
2013 2013 * with checksumming (PM disabled)
2014 2014 */
2015 2015 sd_buf_iodone, /* Index: 31 */
2016 2016 sd_mapblockaddr_iodone, /* Index: 32 */
2017 2017 sd_mapblocksize_iodone, /* Index: 33 */
2018 2018 sd_checksum_iodone, /* Index: 34 */
2019 2019 };
2020 2020
2021 2021
2022 2022 /*
2023 2023 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2024 2024 * each iodone-side chain. These are located by the array index, but as the
2025 2025 * iodone side functions are called in a decrementing-index order, the
2026 2026 * highest index number in each chain must be specified (as these correspond
2027 2027 * to the first function in the iodone chain that will be called by the core
2028 2028 * at IO completion time).
2029 2029 */
2030 2030
2031 2031 #define SD_CHAIN_DISK_IODONE 2
2032 2032 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2033 2033 #define SD_CHAIN_RMMEDIA_IODONE 8
2034 2034 #define SD_CHAIN_MSS_DISK_IODONE 8
2035 2035 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2036 2036 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2037 2037 #define SD_CHAIN_CHKSUM_IODONE 15
2038 2038 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2039 2039 #define SD_CHAIN_USCSI_CMD_IODONE 20
2040 2040 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2041 2041 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2042 2042 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2043 2043 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2044 2044 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2045 2045
2046 2046
2047 2047
2048 2048 /*
2049 2049 * Array to map a layering chain index to the appropriate initpkt routine.
2050 2050 * The redundant entries are present so that the index used for accessing
2051 2051 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2052 2052 * with this table as well.
2053 2053 */
2054 2054 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
2055 2055
2056 2056 static sd_initpkt_t sd_initpkt_map[] = {
2057 2057
2058 2058 /* Chain for buf IO for disk drive targets (PM enabled) */
2059 2059 sd_initpkt_for_buf, /* Index: 0 */
2060 2060 sd_initpkt_for_buf, /* Index: 1 */
2061 2061 sd_initpkt_for_buf, /* Index: 2 */
2062 2062
2063 2063 /* Chain for buf IO for disk drive targets (PM disabled) */
2064 2064 sd_initpkt_for_buf, /* Index: 3 */
2065 2065 sd_initpkt_for_buf, /* Index: 4 */
2066 2066
2067 2067 /*
2068 2068 * Chain for buf IO for removable-media or large sector size
2069 2069 * disk drive targets (PM enabled)
2070 2070 */
2071 2071 sd_initpkt_for_buf, /* Index: 5 */
2072 2072 sd_initpkt_for_buf, /* Index: 6 */
2073 2073 sd_initpkt_for_buf, /* Index: 7 */
2074 2074 sd_initpkt_for_buf, /* Index: 8 */
2075 2075
2076 2076 /*
2077 2077 * Chain for buf IO for removable-media or large sector size
2078 2078 * disk drive targets (PM disabled)
2079 2079 */
2080 2080 sd_initpkt_for_buf, /* Index: 9 */
2081 2081 sd_initpkt_for_buf, /* Index: 10 */
2082 2082 sd_initpkt_for_buf, /* Index: 11 */
2083 2083
2084 2084 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2085 2085 sd_initpkt_for_buf, /* Index: 12 */
2086 2086 sd_initpkt_for_buf, /* Index: 13 */
2087 2087 sd_initpkt_for_buf, /* Index: 14 */
2088 2088 sd_initpkt_for_buf, /* Index: 15 */
2089 2089
2090 2090 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2091 2091 sd_initpkt_for_buf, /* Index: 16 */
2092 2092 sd_initpkt_for_buf, /* Index: 17 */
2093 2093 sd_initpkt_for_buf, /* Index: 18 */
2094 2094
2095 2095 /* Chain for USCSI commands (non-checksum targets) */
2096 2096 sd_initpkt_for_uscsi, /* Index: 19 */
2097 2097 sd_initpkt_for_uscsi, /* Index: 20 */
2098 2098
2099 2099 /* Chain for USCSI commands (checksum targets) */
2100 2100 sd_initpkt_for_uscsi, /* Index: 21 */
2101 2101 sd_initpkt_for_uscsi, /* Index: 22 */
2102 2102 sd_initpkt_for_uscsi, /* Index: 22 */
2103 2103
2104 2104 /* Chain for "direct" USCSI commands (all targets) */
2105 2105 sd_initpkt_for_uscsi, /* Index: 24 */
2106 2106
2107 2107 /* Chain for "direct priority" USCSI commands (all targets) */
2108 2108 sd_initpkt_for_uscsi, /* Index: 25 */
2109 2109
2110 2110 /*
2111 2111 * Chain for buf IO for large sector size disk drive targets
2112 2112 * with checksumming (PM enabled)
2113 2113 */
2114 2114 sd_initpkt_for_buf, /* Index: 26 */
2115 2115 sd_initpkt_for_buf, /* Index: 27 */
2116 2116 sd_initpkt_for_buf, /* Index: 28 */
2117 2117 sd_initpkt_for_buf, /* Index: 29 */
2118 2118 sd_initpkt_for_buf, /* Index: 30 */
2119 2119
2120 2120 /*
2121 2121 * Chain for buf IO for large sector size disk drive targets
2122 2122 * with checksumming (PM disabled)
2123 2123 */
2124 2124 sd_initpkt_for_buf, /* Index: 31 */
2125 2125 sd_initpkt_for_buf, /* Index: 32 */
2126 2126 sd_initpkt_for_buf, /* Index: 33 */
2127 2127 sd_initpkt_for_buf, /* Index: 34 */
2128 2128 };
2129 2129
2130 2130
2131 2131 /*
2132 2132 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2133 2133 * The redundant entries are present so that the index used for accessing
2134 2134 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2135 2135 * with this table as well.
2136 2136 */
2137 2137 typedef void (*sd_destroypkt_t)(struct buf *);
2138 2138
2139 2139 static sd_destroypkt_t sd_destroypkt_map[] = {
2140 2140
2141 2141 /* Chain for buf IO for disk drive targets (PM enabled) */
2142 2142 sd_destroypkt_for_buf, /* Index: 0 */
2143 2143 sd_destroypkt_for_buf, /* Index: 1 */
2144 2144 sd_destroypkt_for_buf, /* Index: 2 */
2145 2145
2146 2146 /* Chain for buf IO for disk drive targets (PM disabled) */
2147 2147 sd_destroypkt_for_buf, /* Index: 3 */
2148 2148 sd_destroypkt_for_buf, /* Index: 4 */
2149 2149
2150 2150 /*
2151 2151 * Chain for buf IO for removable-media or large sector size
2152 2152 * disk drive targets (PM enabled)
2153 2153 */
2154 2154 sd_destroypkt_for_buf, /* Index: 5 */
2155 2155 sd_destroypkt_for_buf, /* Index: 6 */
2156 2156 sd_destroypkt_for_buf, /* Index: 7 */
2157 2157 sd_destroypkt_for_buf, /* Index: 8 */
2158 2158
2159 2159 /*
2160 2160 * Chain for buf IO for removable-media or large sector size
2161 2161 * disk drive targets (PM disabled)
2162 2162 */
2163 2163 sd_destroypkt_for_buf, /* Index: 9 */
2164 2164 sd_destroypkt_for_buf, /* Index: 10 */
2165 2165 sd_destroypkt_for_buf, /* Index: 11 */
2166 2166
2167 2167 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2168 2168 sd_destroypkt_for_buf, /* Index: 12 */
2169 2169 sd_destroypkt_for_buf, /* Index: 13 */
2170 2170 sd_destroypkt_for_buf, /* Index: 14 */
2171 2171 sd_destroypkt_for_buf, /* Index: 15 */
2172 2172
2173 2173 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2174 2174 sd_destroypkt_for_buf, /* Index: 16 */
2175 2175 sd_destroypkt_for_buf, /* Index: 17 */
2176 2176 sd_destroypkt_for_buf, /* Index: 18 */
2177 2177
2178 2178 /* Chain for USCSI commands (non-checksum targets) */
2179 2179 sd_destroypkt_for_uscsi, /* Index: 19 */
2180 2180 sd_destroypkt_for_uscsi, /* Index: 20 */
2181 2181
2182 2182 /* Chain for USCSI commands (checksum targets) */
2183 2183 sd_destroypkt_for_uscsi, /* Index: 21 */
2184 2184 sd_destroypkt_for_uscsi, /* Index: 22 */
2185 2185 sd_destroypkt_for_uscsi, /* Index: 22 */
2186 2186
2187 2187 /* Chain for "direct" USCSI commands (all targets) */
2188 2188 sd_destroypkt_for_uscsi, /* Index: 24 */
2189 2189
2190 2190 /* Chain for "direct priority" USCSI commands (all targets) */
2191 2191 sd_destroypkt_for_uscsi, /* Index: 25 */
2192 2192
2193 2193 /*
2194 2194 * Chain for buf IO for large sector size disk drive targets
2195 2195 * with checksumming (PM disabled)
2196 2196 */
2197 2197 sd_destroypkt_for_buf, /* Index: 26 */
2198 2198 sd_destroypkt_for_buf, /* Index: 27 */
2199 2199 sd_destroypkt_for_buf, /* Index: 28 */
2200 2200 sd_destroypkt_for_buf, /* Index: 29 */
2201 2201 sd_destroypkt_for_buf, /* Index: 30 */
2202 2202
2203 2203 /*
2204 2204 * Chain for buf IO for large sector size disk drive targets
2205 2205 * with checksumming (PM enabled)
2206 2206 */
2207 2207 sd_destroypkt_for_buf, /* Index: 31 */
2208 2208 sd_destroypkt_for_buf, /* Index: 32 */
2209 2209 sd_destroypkt_for_buf, /* Index: 33 */
2210 2210 sd_destroypkt_for_buf, /* Index: 34 */
2211 2211 };
2212 2212
2213 2213
2214 2214
2215 2215 /*
2216 2216 * Array to map a layering chain index to the appropriate chain "type".
2217 2217 * The chain type indicates a specific property/usage of the chain.
2218 2218 * The redundant entries are present so that the index used for accessing
2219 2219 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2220 2220 * with this table as well.
2221 2221 */
2222 2222
2223 2223 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2224 2224 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2225 2225 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2226 2226 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2227 2227 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2228 2228 /* (for error recovery) */
2229 2229
2230 2230 static int sd_chain_type_map[] = {
2231 2231
2232 2232 /* Chain for buf IO for disk drive targets (PM enabled) */
2233 2233 SD_CHAIN_BUFIO, /* Index: 0 */
2234 2234 SD_CHAIN_BUFIO, /* Index: 1 */
2235 2235 SD_CHAIN_BUFIO, /* Index: 2 */
2236 2236
2237 2237 /* Chain for buf IO for disk drive targets (PM disabled) */
2238 2238 SD_CHAIN_BUFIO, /* Index: 3 */
2239 2239 SD_CHAIN_BUFIO, /* Index: 4 */
2240 2240
2241 2241 /*
2242 2242 * Chain for buf IO for removable-media or large sector size
2243 2243 * disk drive targets (PM enabled)
2244 2244 */
2245 2245 SD_CHAIN_BUFIO, /* Index: 5 */
2246 2246 SD_CHAIN_BUFIO, /* Index: 6 */
2247 2247 SD_CHAIN_BUFIO, /* Index: 7 */
2248 2248 SD_CHAIN_BUFIO, /* Index: 8 */
2249 2249
2250 2250 /*
2251 2251 * Chain for buf IO for removable-media or large sector size
2252 2252 * disk drive targets (PM disabled)
2253 2253 */
2254 2254 SD_CHAIN_BUFIO, /* Index: 9 */
2255 2255 SD_CHAIN_BUFIO, /* Index: 10 */
2256 2256 SD_CHAIN_BUFIO, /* Index: 11 */
2257 2257
2258 2258 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2259 2259 SD_CHAIN_BUFIO, /* Index: 12 */
2260 2260 SD_CHAIN_BUFIO, /* Index: 13 */
2261 2261 SD_CHAIN_BUFIO, /* Index: 14 */
2262 2262 SD_CHAIN_BUFIO, /* Index: 15 */
2263 2263
2264 2264 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2265 2265 SD_CHAIN_BUFIO, /* Index: 16 */
2266 2266 SD_CHAIN_BUFIO, /* Index: 17 */
2267 2267 SD_CHAIN_BUFIO, /* Index: 18 */
2268 2268
2269 2269 /* Chain for USCSI commands (non-checksum targets) */
2270 2270 SD_CHAIN_USCSI, /* Index: 19 */
2271 2271 SD_CHAIN_USCSI, /* Index: 20 */
2272 2272
2273 2273 /* Chain for USCSI commands (checksum targets) */
2274 2274 SD_CHAIN_USCSI, /* Index: 21 */
2275 2275 SD_CHAIN_USCSI, /* Index: 22 */
2276 2276 SD_CHAIN_USCSI, /* Index: 23 */
2277 2277
2278 2278 /* Chain for "direct" USCSI commands (all targets) */
2279 2279 SD_CHAIN_DIRECT, /* Index: 24 */
2280 2280
2281 2281 /* Chain for "direct priority" USCSI commands (all targets) */
2282 2282 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */
2283 2283
2284 2284 /*
2285 2285 * Chain for buf IO for large sector size disk drive targets
2286 2286 * with checksumming (PM enabled)
2287 2287 */
2288 2288 SD_CHAIN_BUFIO, /* Index: 26 */
2289 2289 SD_CHAIN_BUFIO, /* Index: 27 */
2290 2290 SD_CHAIN_BUFIO, /* Index: 28 */
2291 2291 SD_CHAIN_BUFIO, /* Index: 29 */
2292 2292 SD_CHAIN_BUFIO, /* Index: 30 */
2293 2293
2294 2294 /*
2295 2295 * Chain for buf IO for large sector size disk drive targets
2296 2296 * with checksumming (PM disabled)
2297 2297 */
2298 2298 SD_CHAIN_BUFIO, /* Index: 31 */
2299 2299 SD_CHAIN_BUFIO, /* Index: 32 */
2300 2300 SD_CHAIN_BUFIO, /* Index: 33 */
2301 2301 SD_CHAIN_BUFIO, /* Index: 34 */
2302 2302 };
2303 2303
2304 2304
2305 2305 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2306 2306 #define SD_IS_BUFIO(xp) \
2307 2307 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2308 2308
2309 2309 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2310 2310 #define SD_IS_DIRECT_PRIORITY(xp) \
2311 2311 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2312 2312
2313 2313
2314 2314
2315 2315 /*
2316 2316 * Struct, array, and macros to map a specific chain to the appropriate
2317 2317 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2318 2318 *
2319 2319 * The sd_chain_index_map[] array is used at attach time to set the various
2320 2320 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2321 2321 * chain to be used with the instance. This allows different instances to use
2322 2322 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2323 2323 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2324 2324 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2325 2325 * dynamically & without the use of locking; and (2) a layer may update the
2326 2326 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2327 2327 * to allow for deferred processing of an IO within the same chain from a
2328 2328 * different execution context.
2329 2329 */
2330 2330
2331 2331 struct sd_chain_index {
2332 2332 int sci_iostart_index;
2333 2333 int sci_iodone_index;
2334 2334 };
2335 2335
2336 2336 static struct sd_chain_index sd_chain_index_map[] = {
2337 2337 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
2338 2338 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
2339 2339 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
2340 2340 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
2341 2341 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
2342 2342 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
2343 2343 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
2344 2344 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
2345 2345 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
2346 2346 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
2347 2347 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
2348 2348 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
2349 2349
2350 2350 };
2351 2351
2352 2352
2353 2353 /*
2354 2354 * The following are indexes into the sd_chain_index_map[] array.
2355 2355 */
2356 2356
2357 2357 /* un->un_buf_chain_type must be set to one of these */
2358 2358 #define SD_CHAIN_INFO_DISK 0
2359 2359 #define SD_CHAIN_INFO_DISK_NO_PM 1
2360 2360 #define SD_CHAIN_INFO_RMMEDIA 2
2361 2361 #define SD_CHAIN_INFO_MSS_DISK 2
2362 2362 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2363 2363 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2364 2364 #define SD_CHAIN_INFO_CHKSUM 4
2365 2365 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2366 2366 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2367 2367 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2368 2368
2369 2369 /* un->un_uscsi_chain_type must be set to one of these */
2370 2370 #define SD_CHAIN_INFO_USCSI_CMD 6
2371 2371 /* USCSI with PM disabled is the same as DIRECT */
2372 2372 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2373 2373 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2374 2374
2375 2375 /* un->un_direct_chain_type must be set to one of these */
2376 2376 #define SD_CHAIN_INFO_DIRECT_CMD 8
2377 2377
2378 2378 /* un->un_priority_chain_type must be set to one of these */
2379 2379 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2380 2380
2381 2381 /* size for devid inquiries */
2382 2382 #define MAX_INQUIRY_SIZE 0xF0
2383 2383
2384 2384 /*
2385 2385 * Macros used by functions to pass a given buf(9S) struct along to the
2386 2386 * next function in the layering chain for further processing.
2387 2387 *
2388 2388 * In the following macros, passing more than three arguments to the called
2389 2389 * routines causes the optimizer for the SPARC compiler to stop doing tail
2390 2390 * call elimination which results in significant performance degradation.
2391 2391 */
2392 2392 #define SD_BEGIN_IOSTART(index, un, bp) \
2393 2393 ((*(sd_iostart_chain[index]))(index, un, bp))
2394 2394
2395 2395 #define SD_BEGIN_IODONE(index, un, bp) \
2396 2396 ((*(sd_iodone_chain[index]))(index, un, bp))
2397 2397
2398 2398 #define SD_NEXT_IOSTART(index, un, bp) \
2399 2399 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2400 2400
2401 2401 #define SD_NEXT_IODONE(index, un, bp) \
2402 2402 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2403 2403
2404 2404 /*
2405 2405 * Function: _init
2406 2406 *
2407 2407 * Description: This is the driver _init(9E) entry point.
2408 2408 *
2409 2409 * Return Code: Returns the value from mod_install(9F) or
2410 2410 * ddi_soft_state_init(9F) as appropriate.
2411 2411 *
2412 2412 * Context: Called when driver module loaded.
2413 2413 */
2414 2414
2415 2415 int
2416 2416 _init(void)
2417 2417 {
2418 2418 int err;
2419 2419
2420 2420 /* establish driver name from module name */
2421 2421 sd_label = (char *)mod_modname(&modlinkage);
2422 2422
2423 2423 #ifndef XPV_HVM_DRIVER
2424 2424 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
2425 2425 SD_MAXUNIT);
2426 2426 if (err != 0) {
2427 2427 return (err);
2428 2428 }
2429 2429
2430 2430 #else /* XPV_HVM_DRIVER */
2431 2431 /* Remove the leading "hvm_" from the module name */
2432 2432 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0);
2433 2433 sd_label += strlen("hvm_");
2434 2434
2435 2435 #endif /* XPV_HVM_DRIVER */
2436 2436
2437 2437 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL);
2438 2438 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
2439 2439 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL);
2440 2440
2441 2441 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
2442 2442 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
2443 2443 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
2444 2444
2445 2445 /*
2446 2446 * it's ok to init here even for fibre device
2447 2447 */
2448 2448 sd_scsi_probe_cache_init();
2449 2449
2450 2450 sd_scsi_target_lun_init();
2451 2451
2452 2452 /*
2453 2453 * Creating taskq before mod_install ensures that all callers (threads)
2454 2454 * that enter the module after a successful mod_install encounter
2455 2455 * a valid taskq.
2456 2456 */
2457 2457 sd_taskq_create();
2458 2458
2459 2459 err = mod_install(&modlinkage);
2460 2460 if (err != 0) {
2461 2461 /* delete taskq if install fails */
2462 2462 sd_taskq_delete();
2463 2463
2464 2464 mutex_destroy(&sd_detach_mutex);
2465 2465 mutex_destroy(&sd_log_mutex);
2466 2466 mutex_destroy(&sd_label_mutex);
2467 2467
2468 2468 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2469 2469 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2470 2470 cv_destroy(&sd_tr.srq_inprocess_cv);
2471 2471
2472 2472 sd_scsi_probe_cache_fini();
2473 2473
2474 2474 sd_scsi_target_lun_fini();
2475 2475
2476 2476 #ifndef XPV_HVM_DRIVER
2477 2477 ddi_soft_state_fini(&sd_state);
2478 2478 #endif /* !XPV_HVM_DRIVER */
2479 2479 return (err);
2480 2480 }
2481 2481
2482 2482 return (err);
2483 2483 }
2484 2484
2485 2485
2486 2486 /*
2487 2487 * Function: _fini
2488 2488 *
2489 2489 * Description: This is the driver _fini(9E) entry point.
2490 2490 *
2491 2491 * Return Code: Returns the value from mod_remove(9F)
2492 2492 *
2493 2493 * Context: Called when driver module is unloaded.
2494 2494 */
2495 2495
2496 2496 int
2497 2497 _fini(void)
2498 2498 {
2499 2499 int err;
2500 2500
2501 2501 if ((err = mod_remove(&modlinkage)) != 0) {
2502 2502 return (err);
2503 2503 }
2504 2504
2505 2505 sd_taskq_delete();
2506 2506
2507 2507 mutex_destroy(&sd_detach_mutex);
2508 2508 mutex_destroy(&sd_log_mutex);
2509 2509 mutex_destroy(&sd_label_mutex);
2510 2510 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2511 2511
2512 2512 sd_scsi_probe_cache_fini();
2513 2513
2514 2514 sd_scsi_target_lun_fini();
2515 2515
2516 2516 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2517 2517 cv_destroy(&sd_tr.srq_inprocess_cv);
2518 2518
2519 2519 #ifndef XPV_HVM_DRIVER
2520 2520 ddi_soft_state_fini(&sd_state);
2521 2521 #endif /* !XPV_HVM_DRIVER */
2522 2522
2523 2523 return (err);
2524 2524 }
2525 2525
2526 2526
2527 2527 /*
2528 2528 * Function: _info
2529 2529 *
2530 2530 * Description: This is the driver _info(9E) entry point.
2531 2531 *
2532 2532 * Arguments: modinfop - pointer to the driver modinfo structure
2533 2533 *
2534 2534 * Return Code: Returns the value from mod_info(9F).
2535 2535 *
2536 2536 * Context: Kernel thread context
2537 2537 */
2538 2538
2539 2539 int
2540 2540 _info(struct modinfo *modinfop)
2541 2541 {
2542 2542 return (mod_info(&modlinkage, modinfop));
2543 2543 }
2544 2544
2545 2545
2546 2546 /*
2547 2547 * The following routines implement the driver message logging facility.
2548 2548 * They provide component- and level- based debug output filtering.
2549 2549 * Output may also be restricted to messages for a single instance by
2550 2550 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2551 2551 * to NULL, then messages for all instances are printed.
2552 2552 *
2553 2553 * These routines have been cloned from each other due to the language
2554 2554 * constraints of macros and variable argument list processing.
2555 2555 */
2556 2556
2557 2557
2558 2558 /*
2559 2559 * Function: sd_log_err
2560 2560 *
2561 2561 * Description: This routine is called by the SD_ERROR macro for debug
2562 2562 * logging of error conditions.
2563 2563 *
2564 2564 * Arguments: comp - driver component being logged
2565 2565 * dev - pointer to driver info structure
2566 2566 * fmt - error string and format to be logged
2567 2567 */
2568 2568
2569 2569 static void
2570 2570 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
2571 2571 {
2572 2572 va_list ap;
2573 2573 dev_info_t *dev;
2574 2574
2575 2575 ASSERT(un != NULL);
2576 2576 dev = SD_DEVINFO(un);
2577 2577 ASSERT(dev != NULL);
2578 2578
2579 2579 /*
2580 2580 * Filter messages based on the global component and level masks.
2581 2581 * Also print if un matches the value of sd_debug_un, or if
2582 2582 * sd_debug_un is set to NULL.
2583 2583 */
2584 2584 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2585 2585 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2586 2586 mutex_enter(&sd_log_mutex);
2587 2587 va_start(ap, fmt);
2588 2588 (void) vsprintf(sd_log_buf, fmt, ap);
2589 2589 va_end(ap);
2590 2590 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2591 2591 mutex_exit(&sd_log_mutex);
2592 2592 }
2593 2593 #ifdef SD_FAULT_INJECTION
2594 2594 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2595 2595 if (un->sd_injection_mask & comp) {
2596 2596 mutex_enter(&sd_log_mutex);
2597 2597 va_start(ap, fmt);
2598 2598 (void) vsprintf(sd_log_buf, fmt, ap);
2599 2599 va_end(ap);
2600 2600 sd_injection_log(sd_log_buf, un);
2601 2601 mutex_exit(&sd_log_mutex);
2602 2602 }
2603 2603 #endif
2604 2604 }
2605 2605
2606 2606
2607 2607 /*
2608 2608 * Function: sd_log_info
2609 2609 *
2610 2610 * Description: This routine is called by the SD_INFO macro for debug
2611 2611 * logging of general purpose informational conditions.
2612 2612 *
2613 2613 * Arguments: comp - driver component being logged
2614 2614 * dev - pointer to driver info structure
2615 2615 * fmt - info string and format to be logged
2616 2616 */
2617 2617
2618 2618 static void
2619 2619 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
2620 2620 {
2621 2621 va_list ap;
2622 2622 dev_info_t *dev;
2623 2623
2624 2624 ASSERT(un != NULL);
2625 2625 dev = SD_DEVINFO(un);
2626 2626 ASSERT(dev != NULL);
2627 2627
2628 2628 /*
2629 2629 * Filter messages based on the global component and level masks.
2630 2630 * Also print if un matches the value of sd_debug_un, or if
2631 2631 * sd_debug_un is set to NULL.
2632 2632 */
2633 2633 if ((sd_component_mask & component) &&
2634 2634 (sd_level_mask & SD_LOGMASK_INFO) &&
2635 2635 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2636 2636 mutex_enter(&sd_log_mutex);
2637 2637 va_start(ap, fmt);
2638 2638 (void) vsprintf(sd_log_buf, fmt, ap);
2639 2639 va_end(ap);
2640 2640 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2641 2641 mutex_exit(&sd_log_mutex);
2642 2642 }
2643 2643 #ifdef SD_FAULT_INJECTION
2644 2644 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2645 2645 if (un->sd_injection_mask & component) {
2646 2646 mutex_enter(&sd_log_mutex);
2647 2647 va_start(ap, fmt);
2648 2648 (void) vsprintf(sd_log_buf, fmt, ap);
2649 2649 va_end(ap);
2650 2650 sd_injection_log(sd_log_buf, un);
2651 2651 mutex_exit(&sd_log_mutex);
2652 2652 }
2653 2653 #endif
2654 2654 }
2655 2655
2656 2656
2657 2657 /*
2658 2658 * Function: sd_log_trace
2659 2659 *
2660 2660 * Description: This routine is called by the SD_TRACE macro for debug
2661 2661 * logging of trace conditions (i.e. function entry/exit).
2662 2662 *
2663 2663 * Arguments: comp - driver component being logged
2664 2664 * dev - pointer to driver info structure
2665 2665 * fmt - trace string and format to be logged
2666 2666 */
2667 2667
2668 2668 static void
2669 2669 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
2670 2670 {
2671 2671 va_list ap;
2672 2672 dev_info_t *dev;
2673 2673
2674 2674 ASSERT(un != NULL);
2675 2675 dev = SD_DEVINFO(un);
2676 2676 ASSERT(dev != NULL);
2677 2677
2678 2678 /*
2679 2679 * Filter messages based on the global component and level masks.
2680 2680 * Also print if un matches the value of sd_debug_un, or if
2681 2681 * sd_debug_un is set to NULL.
2682 2682 */
2683 2683 if ((sd_component_mask & component) &&
2684 2684 (sd_level_mask & SD_LOGMASK_TRACE) &&
2685 2685 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2686 2686 mutex_enter(&sd_log_mutex);
2687 2687 va_start(ap, fmt);
2688 2688 (void) vsprintf(sd_log_buf, fmt, ap);
2689 2689 va_end(ap);
2690 2690 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2691 2691 mutex_exit(&sd_log_mutex);
2692 2692 }
2693 2693 #ifdef SD_FAULT_INJECTION
2694 2694 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2695 2695 if (un->sd_injection_mask & component) {
2696 2696 mutex_enter(&sd_log_mutex);
2697 2697 va_start(ap, fmt);
2698 2698 (void) vsprintf(sd_log_buf, fmt, ap);
2699 2699 va_end(ap);
2700 2700 sd_injection_log(sd_log_buf, un);
2701 2701 mutex_exit(&sd_log_mutex);
2702 2702 }
2703 2703 #endif
2704 2704 }
2705 2705
2706 2706
2707 2707 /*
2708 2708 * Function: sdprobe
2709 2709 *
2710 2710 * Description: This is the driver probe(9e) entry point function.
2711 2711 *
2712 2712 * Arguments: devi - opaque device info handle
2713 2713 *
2714 2714 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2715 2715 * DDI_PROBE_FAILURE: If the probe failed.
2716 2716 * DDI_PROBE_PARTIAL: If the instance is not present now,
2717 2717 * but may be present in the future.
2718 2718 */
2719 2719
2720 2720 static int
2721 2721 sdprobe(dev_info_t *devi)
2722 2722 {
2723 2723 struct scsi_device *devp;
2724 2724 int rval;
2725 2725 #ifndef XPV_HVM_DRIVER
2726 2726 int instance = ddi_get_instance(devi);
2727 2727 #endif /* !XPV_HVM_DRIVER */
2728 2728
2729 2729 /*
2730 2730 * if it wasn't for pln, sdprobe could actually be nulldev
2731 2731 * in the "__fibre" case.
2732 2732 */
2733 2733 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2734 2734 return (DDI_PROBE_DONTCARE);
2735 2735 }
2736 2736
2737 2737 devp = ddi_get_driver_private(devi);
2738 2738
2739 2739 if (devp == NULL) {
2740 2740 /* Ooops... nexus driver is mis-configured... */
2741 2741 return (DDI_PROBE_FAILURE);
2742 2742 }
2743 2743
2744 2744 #ifndef XPV_HVM_DRIVER
2745 2745 if (ddi_get_soft_state(sd_state, instance) != NULL) {
2746 2746 return (DDI_PROBE_PARTIAL);
2747 2747 }
2748 2748 #endif /* !XPV_HVM_DRIVER */
2749 2749
2750 2750 /*
2751 2751 * Call the SCSA utility probe routine to see if we actually
2752 2752 * have a target at this SCSI nexus.
2753 2753 */
2754 2754 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2755 2755 case SCSIPROBE_EXISTS:
2756 2756 switch (devp->sd_inq->inq_dtype) {
2757 2757 case DTYPE_DIRECT:
2758 2758 rval = DDI_PROBE_SUCCESS;
2759 2759 break;
2760 2760 case DTYPE_RODIRECT:
2761 2761 /* CDs etc. Can be removable media */
2762 2762 rval = DDI_PROBE_SUCCESS;
2763 2763 break;
2764 2764 case DTYPE_OPTICAL:
2765 2765 /*
2766 2766 * Rewritable optical driver HP115AA
2767 2767 * Can also be removable media
2768 2768 */
2769 2769
2770 2770 /*
2771 2771 * Do not attempt to bind to DTYPE_OPTICAL if
2772 2772 * pre solaris 9 sparc sd behavior is required
2773 2773 *
2774 2774 * If first time through and sd_dtype_optical_bind
2775 2775 * has not been set in /etc/system check properties
2776 2776 */
2777 2777
2778 2778 if (sd_dtype_optical_bind < 0) {
2779 2779 sd_dtype_optical_bind = ddi_prop_get_int
2780 2780 (DDI_DEV_T_ANY, devi, 0,
2781 2781 "optical-device-bind", 1);
2782 2782 }
2783 2783
2784 2784 if (sd_dtype_optical_bind == 0) {
2785 2785 rval = DDI_PROBE_FAILURE;
2786 2786 } else {
2787 2787 rval = DDI_PROBE_SUCCESS;
2788 2788 }
2789 2789 break;
2790 2790
2791 2791 case DTYPE_NOTPRESENT:
2792 2792 default:
2793 2793 rval = DDI_PROBE_FAILURE;
2794 2794 break;
2795 2795 }
2796 2796 break;
2797 2797 default:
2798 2798 rval = DDI_PROBE_PARTIAL;
2799 2799 break;
2800 2800 }
2801 2801
2802 2802 /*
2803 2803 * This routine checks for resource allocation prior to freeing,
2804 2804 * so it will take care of the "smart probing" case where a
2805 2805 * scsi_probe() may or may not have been issued and will *not*
2806 2806 * free previously-freed resources.
2807 2807 */
2808 2808 scsi_unprobe(devp);
2809 2809 return (rval);
2810 2810 }
2811 2811
2812 2812
2813 2813 /*
2814 2814 * Function: sdinfo
2815 2815 *
2816 2816 * Description: This is the driver getinfo(9e) entry point function.
2817 2817 * Given the device number, return the devinfo pointer from
2818 2818 * the scsi_device structure or the instance number
2819 2819 * associated with the dev_t.
2820 2820 *
2821 2821 * Arguments: dip - pointer to device info structure
2822 2822 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2823 2823 * DDI_INFO_DEVT2INSTANCE)
2824 2824 * arg - driver dev_t
2825 2825 * resultp - user buffer for request response
2826 2826 *
2827 2827 * Return Code: DDI_SUCCESS
2828 2828 * DDI_FAILURE
2829 2829 */
2830 2830 /* ARGSUSED */
2831 2831 static int
2832 2832 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2833 2833 {
2834 2834 struct sd_lun *un;
2835 2835 dev_t dev;
2836 2836 int instance;
2837 2837 int error;
2838 2838
2839 2839 switch (infocmd) {
2840 2840 case DDI_INFO_DEVT2DEVINFO:
2841 2841 dev = (dev_t)arg;
2842 2842 instance = SDUNIT(dev);
2843 2843 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
2844 2844 return (DDI_FAILURE);
2845 2845 }
2846 2846 *result = (void *) SD_DEVINFO(un);
2847 2847 error = DDI_SUCCESS;
2848 2848 break;
2849 2849 case DDI_INFO_DEVT2INSTANCE:
2850 2850 dev = (dev_t)arg;
2851 2851 instance = SDUNIT(dev);
2852 2852 *result = (void *)(uintptr_t)instance;
2853 2853 error = DDI_SUCCESS;
2854 2854 break;
2855 2855 default:
2856 2856 error = DDI_FAILURE;
2857 2857 }
2858 2858 return (error);
2859 2859 }
2860 2860
2861 2861 /*
2862 2862 * Function: sd_prop_op
2863 2863 *
2864 2864 * Description: This is the driver prop_op(9e) entry point function.
2865 2865 * Return the number of blocks for the partition in question
2866 2866 * or forward the request to the property facilities.
2867 2867 *
2868 2868 * Arguments: dev - device number
2869 2869 * dip - pointer to device info structure
2870 2870 * prop_op - property operator
2871 2871 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2872 2872 * name - pointer to property name
2873 2873 * valuep - pointer or address of the user buffer
2874 2874 * lengthp - property length
2875 2875 *
2876 2876 * Return Code: DDI_PROP_SUCCESS
2877 2877 * DDI_PROP_NOT_FOUND
2878 2878 * DDI_PROP_UNDEFINED
2879 2879 * DDI_PROP_NO_MEMORY
2880 2880 * DDI_PROP_BUF_TOO_SMALL
2881 2881 */
2882 2882
2883 2883 static int
2884 2884 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2885 2885 char *name, caddr_t valuep, int *lengthp)
2886 2886 {
2887 2887 struct sd_lun *un;
2888 2888
2889 2889 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2890 2890 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2891 2891 name, valuep, lengthp));
2892 2892
2893 2893 return (cmlb_prop_op(un->un_cmlbhandle,
2894 2894 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2895 2895 SDPART(dev), (void *)SD_PATH_DIRECT));
2896 2896 }
2897 2897
2898 2898 /*
2899 2899 * The following functions are for smart probing:
2900 2900 * sd_scsi_probe_cache_init()
2901 2901 * sd_scsi_probe_cache_fini()
2902 2902 * sd_scsi_clear_probe_cache()
2903 2903 * sd_scsi_probe_with_cache()
2904 2904 */
2905 2905
2906 2906 /*
2907 2907 * Function: sd_scsi_probe_cache_init
2908 2908 *
2909 2909 * Description: Initializes the probe response cache mutex and head pointer.
2910 2910 *
2911 2911 * Context: Kernel thread context
2912 2912 */
2913 2913
2914 2914 static void
2915 2915 sd_scsi_probe_cache_init(void)
2916 2916 {
2917 2917 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
2918 2918 sd_scsi_probe_cache_head = NULL;
2919 2919 }
2920 2920
2921 2921
2922 2922 /*
2923 2923 * Function: sd_scsi_probe_cache_fini
2924 2924 *
2925 2925 * Description: Frees all resources associated with the probe response cache.
2926 2926 *
2927 2927 * Context: Kernel thread context
2928 2928 */
2929 2929
2930 2930 static void
2931 2931 sd_scsi_probe_cache_fini(void)
2932 2932 {
2933 2933 struct sd_scsi_probe_cache *cp;
2934 2934 struct sd_scsi_probe_cache *ncp;
2935 2935
2936 2936 /* Clean up our smart probing linked list */
2937 2937 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
2938 2938 ncp = cp->next;
2939 2939 kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
2940 2940 }
2941 2941 sd_scsi_probe_cache_head = NULL;
2942 2942 mutex_destroy(&sd_scsi_probe_cache_mutex);
2943 2943 }
2944 2944
2945 2945
2946 2946 /*
2947 2947 * Function: sd_scsi_clear_probe_cache
2948 2948 *
2949 2949 * Description: This routine clears the probe response cache. This is
2950 2950 * done when open() returns ENXIO so that when deferred
2951 2951 * attach is attempted (possibly after a device has been
2952 2952 * turned on) we will retry the probe. Since we don't know
2953 2953 * which target we failed to open, we just clear the
2954 2954 * entire cache.
2955 2955 *
2956 2956 * Context: Kernel thread context
2957 2957 */
2958 2958
2959 2959 static void
2960 2960 sd_scsi_clear_probe_cache(void)
2961 2961 {
2962 2962 struct sd_scsi_probe_cache *cp;
2963 2963 int i;
2964 2964
2965 2965 mutex_enter(&sd_scsi_probe_cache_mutex);
2966 2966 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2967 2967 /*
2968 2968 * Reset all entries to SCSIPROBE_EXISTS. This will
2969 2969 * force probing to be performed the next time
2970 2970 * sd_scsi_probe_with_cache is called.
2971 2971 */
2972 2972 for (i = 0; i < NTARGETS_WIDE; i++) {
2973 2973 cp->cache[i] = SCSIPROBE_EXISTS;
2974 2974 }
2975 2975 }
2976 2976 mutex_exit(&sd_scsi_probe_cache_mutex);
2977 2977 }
2978 2978
2979 2979
2980 2980 /*
2981 2981 * Function: sd_scsi_probe_with_cache
2982 2982 *
2983 2983 * Description: This routine implements support for a scsi device probe
2984 2984 * with cache. The driver maintains a cache of the target
2985 2985 * responses to scsi probes. If we get no response from a
2986 2986 * target during a probe inquiry, we remember that, and we
2987 2987 * avoid additional calls to scsi_probe on non-zero LUNs
2988 2988 * on the same target until the cache is cleared. By doing
2989 2989 * so we avoid the 1/4 sec selection timeout for nonzero
2990 2990 * LUNs. lun0 of a target is always probed.
2991 2991 *
2992 2992 * Arguments: devp - Pointer to a scsi_device(9S) structure
2993 2993 * waitfunc - indicates what the allocator routines should
2994 2994 * do when resources are not available. This value
2995 2995 * is passed on to scsi_probe() when that routine
2996 2996 * is called.
2997 2997 *
2998 2998 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2999 2999 * otherwise the value returned by scsi_probe(9F).
3000 3000 *
3001 3001 * Context: Kernel thread context
3002 3002 */
3003 3003
3004 3004 static int
3005 3005 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
3006 3006 {
3007 3007 struct sd_scsi_probe_cache *cp;
3008 3008 dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
3009 3009 int lun, tgt;
3010 3010
3011 3011 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3012 3012 SCSI_ADDR_PROP_LUN, 0);
3013 3013 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3014 3014 SCSI_ADDR_PROP_TARGET, -1);
3015 3015
3016 3016 /* Make sure caching enabled and target in range */
3017 3017 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
3018 3018 /* do it the old way (no cache) */
3019 3019 return (scsi_probe(devp, waitfn));
3020 3020 }
3021 3021
3022 3022 mutex_enter(&sd_scsi_probe_cache_mutex);
3023 3023
3024 3024 /* Find the cache for this scsi bus instance */
3025 3025 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
3026 3026 if (cp->pdip == pdip) {
3027 3027 break;
3028 3028 }
3029 3029 }
3030 3030
3031 3031 /* If we can't find a cache for this pdip, create one */
3032 3032 if (cp == NULL) {
3033 3033 int i;
3034 3034
3035 3035 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
3036 3036 KM_SLEEP);
3037 3037 cp->pdip = pdip;
3038 3038 cp->next = sd_scsi_probe_cache_head;
3039 3039 sd_scsi_probe_cache_head = cp;
3040 3040 for (i = 0; i < NTARGETS_WIDE; i++) {
3041 3041 cp->cache[i] = SCSIPROBE_EXISTS;
3042 3042 }
3043 3043 }
3044 3044
3045 3045 mutex_exit(&sd_scsi_probe_cache_mutex);
3046 3046
3047 3047 /* Recompute the cache for this target if LUN zero */
3048 3048 if (lun == 0) {
3049 3049 cp->cache[tgt] = SCSIPROBE_EXISTS;
3050 3050 }
3051 3051
3052 3052 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3053 3053 if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
3054 3054 return (SCSIPROBE_NORESP);
3055 3055 }
3056 3056
3057 3057 /* Do the actual probe; save & return the result */
3058 3058 return (cp->cache[tgt] = scsi_probe(devp, waitfn));
3059 3059 }
3060 3060
3061 3061
3062 3062 /*
3063 3063 * Function: sd_scsi_target_lun_init
3064 3064 *
3065 3065 * Description: Initializes the attached lun chain mutex and head pointer.
3066 3066 *
3067 3067 * Context: Kernel thread context
3068 3068 */
3069 3069
3070 3070 static void
3071 3071 sd_scsi_target_lun_init(void)
3072 3072 {
3073 3073 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
3074 3074 sd_scsi_target_lun_head = NULL;
3075 3075 }
3076 3076
3077 3077
3078 3078 /*
3079 3079 * Function: sd_scsi_target_lun_fini
3080 3080 *
3081 3081 * Description: Frees all resources associated with the attached lun
3082 3082 * chain
3083 3083 *
3084 3084 * Context: Kernel thread context
3085 3085 */
3086 3086
3087 3087 static void
3088 3088 sd_scsi_target_lun_fini(void)
3089 3089 {
3090 3090 struct sd_scsi_hba_tgt_lun *cp;
3091 3091 struct sd_scsi_hba_tgt_lun *ncp;
3092 3092
3093 3093 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
3094 3094 ncp = cp->next;
3095 3095 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
3096 3096 }
3097 3097 sd_scsi_target_lun_head = NULL;
3098 3098 mutex_destroy(&sd_scsi_target_lun_mutex);
3099 3099 }
3100 3100
3101 3101
3102 3102 /*
3103 3103 * Function: sd_scsi_get_target_lun_count
3104 3104 *
3105 3105 * Description: This routine will check in the attached lun chain to see
3106 3106 * how many luns are attached on the required SCSI controller
3107 3107 * and target. Currently, some capabilities like tagged queue
3108 3108 * are supported per target based by HBA. So all luns in a
3109 3109 * target have the same capabilities. Based on this assumption,
3110 3110 * sd should only set these capabilities once per target. This
3111 3111 * function is called when sd needs to decide how many luns
3112 3112 * already attached on a target.
3113 3113 *
3114 3114 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3115 3115 * controller device.
3116 3116 * target - The target ID on the controller's SCSI bus.
3117 3117 *
3118 3118 * Return Code: The number of luns attached on the required target and
3119 3119 * controller.
3120 3120 * -1 if target ID is not in parallel SCSI scope or the given
3121 3121 * dip is not in the chain.
3122 3122 *
3123 3123 * Context: Kernel thread context
3124 3124 */
3125 3125
3126 3126 static int
3127 3127 sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
3128 3128 {
3129 3129 struct sd_scsi_hba_tgt_lun *cp;
3130 3130
3131 3131 if ((target < 0) || (target >= NTARGETS_WIDE)) {
3132 3132 return (-1);
3133 3133 }
3134 3134
3135 3135 mutex_enter(&sd_scsi_target_lun_mutex);
3136 3136
3137 3137 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3138 3138 if (cp->pdip == dip) {
3139 3139 break;
3140 3140 }
3141 3141 }
3142 3142
3143 3143 mutex_exit(&sd_scsi_target_lun_mutex);
3144 3144
3145 3145 if (cp == NULL) {
3146 3146 return (-1);
3147 3147 }
3148 3148
3149 3149 return (cp->nlun[target]);
3150 3150 }
3151 3151
3152 3152
3153 3153 /*
3154 3154 * Function: sd_scsi_update_lun_on_target
3155 3155 *
3156 3156 * Description: This routine is used to update the attached lun chain when a
3157 3157 * lun is attached or detached on a target.
3158 3158 *
3159 3159 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3160 3160 * controller device.
3161 3161 * target - The target ID on the controller's SCSI bus.
3162 3162 * flag - Indicate the lun is attached or detached.
3163 3163 *
3164 3164 * Context: Kernel thread context
3165 3165 */
3166 3166
3167 3167 static void
3168 3168 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
3169 3169 {
3170 3170 struct sd_scsi_hba_tgt_lun *cp;
3171 3171
3172 3172 mutex_enter(&sd_scsi_target_lun_mutex);
3173 3173
3174 3174 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3175 3175 if (cp->pdip == dip) {
3176 3176 break;
3177 3177 }
3178 3178 }
3179 3179
3180 3180 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
3181 3181 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
3182 3182 KM_SLEEP);
3183 3183 cp->pdip = dip;
3184 3184 cp->next = sd_scsi_target_lun_head;
3185 3185 sd_scsi_target_lun_head = cp;
3186 3186 }
3187 3187
3188 3188 mutex_exit(&sd_scsi_target_lun_mutex);
3189 3189
3190 3190 if (cp != NULL) {
3191 3191 if (flag == SD_SCSI_LUN_ATTACH) {
3192 3192 cp->nlun[target] ++;
3193 3193 } else {
3194 3194 cp->nlun[target] --;
3195 3195 }
3196 3196 }
3197 3197 }
3198 3198
3199 3199
3200 3200 /*
3201 3201 * Function: sd_spin_up_unit
3202 3202 *
3203 3203 * Description: Issues the following commands to spin-up the device:
3204 3204 * START STOP UNIT, and INQUIRY.
3205 3205 *
3206 3206 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3207 3207 * structure for this target.
3208 3208 *
3209 3209 * Return Code: 0 - success
3210 3210 * EIO - failure
3211 3211 * EACCES - reservation conflict
3212 3212 *
3213 3213 * Context: Kernel thread context
3214 3214 */
3215 3215
3216 3216 static int
3217 3217 sd_spin_up_unit(sd_ssc_t *ssc)
3218 3218 {
3219 3219 size_t resid = 0;
3220 3220 int has_conflict = FALSE;
3221 3221 uchar_t *bufaddr;
3222 3222 int status;
3223 3223 struct sd_lun *un;
3224 3224
3225 3225 ASSERT(ssc != NULL);
3226 3226 un = ssc->ssc_un;
3227 3227 ASSERT(un != NULL);
3228 3228
3229 3229 /*
3230 3230 * Send a throwaway START UNIT command.
3231 3231 *
3232 3232 * If we fail on this, we don't care presently what precisely
3233 3233 * is wrong. EMC's arrays will also fail this with a check
3234 3234 * condition (0x2/0x4/0x3) if the device is "inactive," but
3235 3235 * we don't want to fail the attach because it may become
3236 3236 * "active" later.
3237 3237 * We don't know if power condition is supported or not at
3238 3238 * this stage, use START STOP bit.
3239 3239 */
3240 3240 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3241 3241 SD_TARGET_START, SD_PATH_DIRECT);
3242 3242
3243 3243 if (status != 0) {
3244 3244 if (status == EACCES)
3245 3245 has_conflict = TRUE;
3246 3246 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3247 3247 }
3248 3248
3249 3249 /*
3250 3250 * Send another INQUIRY command to the target. This is necessary for
3251 3251 * non-removable media direct access devices because their INQUIRY data
3252 3252 * may not be fully qualified until they are spun up (perhaps via the
3253 3253 * START command above). Note: This seems to be needed for some
3254 3254 * legacy devices only.) The INQUIRY command should succeed even if a
3255 3255 * Reservation Conflict is present.
3256 3256 */
3257 3257 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
3258 3258
3259 3259 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
3260 3260 != 0) {
3261 3261 kmem_free(bufaddr, SUN_INQSIZE);
3262 3262 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
3263 3263 return (EIO);
3264 3264 }
3265 3265
3266 3266 /*
3267 3267 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3268 3268 * Note that this routine does not return a failure here even if the
3269 3269 * INQUIRY command did not return any data. This is a legacy behavior.
3270 3270 */
3271 3271 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
3272 3272 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
3273 3273 }
3274 3274
3275 3275 kmem_free(bufaddr, SUN_INQSIZE);
3276 3276
3277 3277 /* If we hit a reservation conflict above, tell the caller. */
3278 3278 if (has_conflict == TRUE) {
3279 3279 return (EACCES);
3280 3280 }
3281 3281
3282 3282 return (0);
3283 3283 }
3284 3284
3285 3285 #ifdef _LP64
3286 3286 /*
3287 3287 * Function: sd_enable_descr_sense
3288 3288 *
3289 3289 * Description: This routine attempts to select descriptor sense format
3290 3290 * using the Control mode page. Devices that support 64 bit
3291 3291 * LBAs (for >2TB luns) should also implement descriptor
3292 3292 * sense data so we will call this function whenever we see
3293 3293 * a lun larger than 2TB. If for some reason the device
3294 3294 * supports 64 bit LBAs but doesn't support descriptor sense
3295 3295 * presumably the mode select will fail. Everything will
3296 3296 * continue to work normally except that we will not get
3297 3297 * complete sense data for commands that fail with an LBA
3298 3298 * larger than 32 bits.
3299 3299 *
3300 3300 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3301 3301 * structure for this target.
3302 3302 *
3303 3303 * Context: Kernel thread context only
3304 3304 */
3305 3305
3306 3306 static void
3307 3307 sd_enable_descr_sense(sd_ssc_t *ssc)
3308 3308 {
3309 3309 uchar_t *header;
3310 3310 struct mode_control_scsi3 *ctrl_bufp;
3311 3311 size_t buflen;
3312 3312 size_t bd_len;
3313 3313 int status;
3314 3314 struct sd_lun *un;
3315 3315
3316 3316 ASSERT(ssc != NULL);
3317 3317 un = ssc->ssc_un;
3318 3318 ASSERT(un != NULL);
3319 3319
3320 3320 /*
3321 3321 * Read MODE SENSE page 0xA, Control Mode Page
3322 3322 */
3323 3323 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
3324 3324 sizeof (struct mode_control_scsi3);
3325 3325 header = kmem_zalloc(buflen, KM_SLEEP);
3326 3326
3327 3327 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
3328 3328 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
3329 3329
3330 3330 if (status != 0) {
3331 3331 SD_ERROR(SD_LOG_COMMON, un,
3332 3332 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3333 3333 goto eds_exit;
3334 3334 }
3335 3335
3336 3336 /*
3337 3337 * Determine size of Block Descriptors in order to locate
3338 3338 * the mode page data. ATAPI devices return 0, SCSI devices
3339 3339 * should return MODE_BLK_DESC_LENGTH.
3340 3340 */
3341 3341 bd_len = ((struct mode_header *)header)->bdesc_length;
3342 3342
3343 3343 /* Clear the mode data length field for MODE SELECT */
3344 3344 ((struct mode_header *)header)->length = 0;
3345 3345
3346 3346 ctrl_bufp = (struct mode_control_scsi3 *)
3347 3347 (header + MODE_HEADER_LENGTH + bd_len);
3348 3348
3349 3349 /*
3350 3350 * If the page length is smaller than the expected value,
3351 3351 * the target device doesn't support D_SENSE. Bail out here.
3352 3352 */
3353 3353 if (ctrl_bufp->mode_page.length <
3354 3354 sizeof (struct mode_control_scsi3) - 2) {
3355 3355 SD_ERROR(SD_LOG_COMMON, un,
3356 3356 "sd_enable_descr_sense: enable D_SENSE failed\n");
3357 3357 goto eds_exit;
3358 3358 }
3359 3359
3360 3360 /*
3361 3361 * Clear PS bit for MODE SELECT
3362 3362 */
3363 3363 ctrl_bufp->mode_page.ps = 0;
3364 3364
3365 3365 /*
3366 3366 * Set D_SENSE to enable descriptor sense format.
3367 3367 */
3368 3368 ctrl_bufp->d_sense = 1;
3369 3369
3370 3370 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3371 3371
3372 3372 /*
3373 3373 * Use MODE SELECT to commit the change to the D_SENSE bit
3374 3374 */
3375 3375 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
3376 3376 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
3377 3377
3378 3378 if (status != 0) {
3379 3379 SD_INFO(SD_LOG_COMMON, un,
3380 3380 "sd_enable_descr_sense: mode select ctrl page failed\n");
3381 3381 } else {
3382 3382 kmem_free(header, buflen);
3383 3383 return;
3384 3384 }
3385 3385
3386 3386 eds_exit:
3387 3387 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3388 3388 kmem_free(header, buflen);
3389 3389 }
3390 3390
3391 3391 /*
3392 3392 * Function: sd_reenable_dsense_task
3393 3393 *
3394 3394 * Description: Re-enable descriptor sense after device or bus reset
3395 3395 *
3396 3396 * Context: Executes in a taskq() thread context
3397 3397 */
3398 3398 static void
3399 3399 sd_reenable_dsense_task(void *arg)
3400 3400 {
3401 3401 struct sd_lun *un = arg;
3402 3402 sd_ssc_t *ssc;
3403 3403
3404 3404 ASSERT(un != NULL);
3405 3405
3406 3406 ssc = sd_ssc_init(un);
3407 3407 sd_enable_descr_sense(ssc);
3408 3408 sd_ssc_fini(ssc);
3409 3409 }
3410 3410 #endif /* _LP64 */
3411 3411
3412 3412 /*
3413 3413 * Function: sd_set_mmc_caps
3414 3414 *
3415 3415 * Description: This routine determines if the device is MMC compliant and if
3416 3416 * the device supports CDDA via a mode sense of the CDVD
3417 3417 * capabilities mode page. Also checks if the device is a
3418 3418 * dvdram writable device.
3419 3419 *
3420 3420 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3421 3421 * structure for this target.
3422 3422 *
3423 3423 * Context: Kernel thread context only
3424 3424 */
3425 3425
3426 3426 static void
3427 3427 sd_set_mmc_caps(sd_ssc_t *ssc)
3428 3428 {
3429 3429 struct mode_header_grp2 *sense_mhp;
3430 3430 uchar_t *sense_page;
3431 3431 caddr_t buf;
3432 3432 int bd_len;
3433 3433 int status;
3434 3434 struct uscsi_cmd com;
3435 3435 int rtn;
3436 3436 uchar_t *out_data_rw, *out_data_hd;
3437 3437 uchar_t *rqbuf_rw, *rqbuf_hd;
3438 3438 uchar_t *out_data_gesn;
3439 3439 int gesn_len;
3440 3440 struct sd_lun *un;
3441 3441
3442 3442 ASSERT(ssc != NULL);
3443 3443 un = ssc->ssc_un;
3444 3444 ASSERT(un != NULL);
3445 3445
3446 3446 /*
3447 3447 * The flags which will be set in this function are - mmc compliant,
3448 3448 * dvdram writable device, cdda support. Initialize them to FALSE
3449 3449 * and if a capability is detected - it will be set to TRUE.
3450 3450 */
3451 3451 un->un_f_mmc_cap = FALSE;
3452 3452 un->un_f_dvdram_writable_device = FALSE;
3453 3453 un->un_f_cfg_cdda = FALSE;
3454 3454
3455 3455 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3456 3456 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3457 3457 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
3458 3458
3459 3459 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3460 3460
3461 3461 if (status != 0) {
3462 3462 /* command failed; just return */
3463 3463 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3464 3464 return;
3465 3465 }
3466 3466 /*
3467 3467 * If the mode sense request for the CDROM CAPABILITIES
3468 3468 * page (0x2A) succeeds the device is assumed to be MMC.
3469 3469 */
3470 3470 un->un_f_mmc_cap = TRUE;
3471 3471
3472 3472 /* See if GET STATUS EVENT NOTIFICATION is supported */
3473 3473 if (un->un_f_mmc_gesn_polling) {
3474 3474 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
3475 3475 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
3476 3476
3477 3477 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
3478 3478 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
3479 3479
3480 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3481 3481
3482 3482 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
3483 3483 un->un_f_mmc_gesn_polling = FALSE;
3484 3484 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3485 3485 "sd_set_mmc_caps: gesn not supported "
3486 3486 "%d %x %x %x %x\n", rtn,
3487 3487 out_data_gesn[0], out_data_gesn[1],
3488 3488 out_data_gesn[2], out_data_gesn[3]);
3489 3489 }
3490 3490
3491 3491 kmem_free(out_data_gesn, gesn_len);
3492 3492 }
3493 3493
3494 3494 /* Get to the page data */
3495 3495 sense_mhp = (struct mode_header_grp2 *)buf;
3496 3496 bd_len = (sense_mhp->bdesc_length_hi << 8) |
3497 3497 sense_mhp->bdesc_length_lo;
3498 3498 if (bd_len > MODE_BLK_DESC_LENGTH) {
3499 3499 /*
3500 3500 * We did not get back the expected block descriptor
3501 3501 * length so we cannot determine if the device supports
3502 3502 * CDDA. However, we still indicate the device is MMC
3503 3503 * according to the successful response to the page
3504 3504 * 0x2A mode sense request.
3505 3505 */
3506 3506 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3507 3507 "sd_set_mmc_caps: Mode Sense returned "
3508 3508 "invalid block descriptor length\n");
3509 3509 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3510 3510 return;
3511 3511 }
3512 3512
3513 3513 /* See if read CDDA is supported */
3514 3514 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
3515 3515 bd_len);
3516 3516 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
3517 3517
3518 3518 /* See if writing DVD RAM is supported. */
3519 3519 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
3520 3520 if (un->un_f_dvdram_writable_device == TRUE) {
3521 3521 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3522 3522 return;
3523 3523 }
3524 3524
3525 3525 /*
3526 3526 * If the device presents DVD or CD capabilities in the mode
3527 3527 * page, we can return here since a RRD will not have
3528 3528 * these capabilities.
3529 3529 */
3530 3530 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3531 3531 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3532 3532 return;
3533 3533 }
3534 3534 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3535 3535
3536 3536 /*
3537 3537 * If un->un_f_dvdram_writable_device is still FALSE,
3538 3538 * check for a Removable Rigid Disk (RRD). A RRD
3539 3539 * device is identified by the features RANDOM_WRITABLE and
3540 3540 * HARDWARE_DEFECT_MANAGEMENT.
3541 3541 */
3542 3542 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3543 3543 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3544 3544
3545 3545 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3546 3546 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3547 3547 RANDOM_WRITABLE, SD_PATH_STANDARD);
3548 3548
3549 3549 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3550 3550
3551 3551 if (rtn != 0) {
3552 3552 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3553 3553 kmem_free(rqbuf_rw, SENSE_LENGTH);
3554 3554 return;
3555 3555 }
3556 3556
3557 3557 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3558 3558 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3559 3559
3560 3560 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3561 3561 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3562 3562 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
3563 3563
3564 3564 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3565 3565
3566 3566 if (rtn == 0) {
3567 3567 /*
3568 3568 * We have good information, check for random writable
3569 3569 * and hardware defect features.
3570 3570 */
3571 3571 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3572 3572 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
3573 3573 un->un_f_dvdram_writable_device = TRUE;
3574 3574 }
3575 3575 }
3576 3576
3577 3577 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3578 3578 kmem_free(rqbuf_rw, SENSE_LENGTH);
3579 3579 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3580 3580 kmem_free(rqbuf_hd, SENSE_LENGTH);
3581 3581 }
3582 3582
3583 3583 /*
3584 3584 * Function: sd_check_for_writable_cd
3585 3585 *
3586 3586 * Description: This routine determines if the media in the device is
3587 3587 * writable or not. It uses the get configuration command (0x46)
3588 3588 * to determine if the media is writable
3589 3589 *
3590 3590 * Arguments: un - driver soft state (unit) structure
3591 3591 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3592 3592 * chain and the normal command waitq, or
3593 3593 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3594 3594 * "direct" chain and bypass the normal command
3595 3595 * waitq.
3596 3596 *
3597 3597 * Context: Never called at interrupt context.
3598 3598 */
3599 3599
3600 3600 static void
3601 3601 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
3602 3602 {
3603 3603 struct uscsi_cmd com;
3604 3604 uchar_t *out_data;
3605 3605 uchar_t *rqbuf;
3606 3606 int rtn;
3607 3607 uchar_t *out_data_rw, *out_data_hd;
3608 3608 uchar_t *rqbuf_rw, *rqbuf_hd;
3609 3609 struct mode_header_grp2 *sense_mhp;
3610 3610 uchar_t *sense_page;
3611 3611 caddr_t buf;
3612 3612 int bd_len;
3613 3613 int status;
3614 3614 struct sd_lun *un;
3615 3615
3616 3616 ASSERT(ssc != NULL);
3617 3617 un = ssc->ssc_un;
3618 3618 ASSERT(un != NULL);
3619 3619 ASSERT(mutex_owned(SD_MUTEX(un)));
3620 3620
3621 3621 /*
3622 3622 * Initialize the writable media to false, if configuration info.
3623 3623 * tells us otherwise then only we will set it.
3624 3624 */
3625 3625 un->un_f_mmc_writable_media = FALSE;
3626 3626 mutex_exit(SD_MUTEX(un));
3627 3627
3628 3628 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
3629 3629 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3630 3630
3631 3631 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
3632 3632 out_data, SD_PROFILE_HEADER_LEN, path_flag);
3633 3633
3634 3634 if (rtn != 0)
3635 3635 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3636 3636
3637 3637 mutex_enter(SD_MUTEX(un));
3638 3638 if (rtn == 0) {
3639 3639 /*
3640 3640 * We have good information, check for writable DVD.
3641 3641 */
3642 3642 if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
3643 3643 un->un_f_mmc_writable_media = TRUE;
3644 3644 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3645 3645 kmem_free(rqbuf, SENSE_LENGTH);
3646 3646 return;
3647 3647 }
3648 3648 }
3649 3649
3650 3650 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3651 3651 kmem_free(rqbuf, SENSE_LENGTH);
3652 3652
3653 3653 /*
3654 3654 * Determine if this is a RRD type device.
3655 3655 */
3656 3656 mutex_exit(SD_MUTEX(un));
3657 3657 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3658 3658 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3659 3659 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
3660 3660
3661 3661 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3662 3662
3663 3663 mutex_enter(SD_MUTEX(un));
3664 3664 if (status != 0) {
3665 3665 /* command failed; just return */
3666 3666 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3667 3667 return;
3668 3668 }
3669 3669
3670 3670 /* Get to the page data */
3671 3671 sense_mhp = (struct mode_header_grp2 *)buf;
3672 3672 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
3673 3673 if (bd_len > MODE_BLK_DESC_LENGTH) {
3674 3674 /*
3675 3675 * We did not get back the expected block descriptor length so
3676 3676 * we cannot check the mode page.
3677 3677 */
3678 3678 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3679 3679 "sd_check_for_writable_cd: Mode Sense returned "
3680 3680 "invalid block descriptor length\n");
3681 3681 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3682 3682 return;
3683 3683 }
3684 3684
3685 3685 /*
3686 3686 * If the device presents DVD or CD capabilities in the mode
3687 3687 * page, we can return here since a RRD device will not have
3688 3688 * these capabilities.
3689 3689 */
3690 3690 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
3691 3691 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3692 3692 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3693 3693 return;
3694 3694 }
3695 3695 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3696 3696
3697 3697 /*
3698 3698 * If un->un_f_mmc_writable_media is still FALSE,
3699 3699 * check for RRD type media. A RRD device is identified
3700 3700 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3701 3701 */
3702 3702 mutex_exit(SD_MUTEX(un));
3703 3703 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3704 3704 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3705 3705
3706 3706 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3707 3707 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3708 3708 RANDOM_WRITABLE, path_flag);
3709 3709
3710 3710 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3711 3711 if (rtn != 0) {
3712 3712 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3713 3713 kmem_free(rqbuf_rw, SENSE_LENGTH);
3714 3714 mutex_enter(SD_MUTEX(un));
3715 3715 return;
3716 3716 }
3717 3717
3718 3718 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3719 3719 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3720 3720
3721 3721 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3722 3722 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3723 3723 HARDWARE_DEFECT_MANAGEMENT, path_flag);
3724 3724
3725 3725 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3726 3726 mutex_enter(SD_MUTEX(un));
3727 3727 if (rtn == 0) {
3728 3728 /*
3729 3729 * We have good information, check for random writable
3730 3730 * and hardware defect features as current.
3731 3731 */
3732 3732 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3733 3733 (out_data_rw[10] & 0x1) &&
3734 3734 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
3735 3735 (out_data_hd[10] & 0x1)) {
3736 3736 un->un_f_mmc_writable_media = TRUE;
3737 3737 }
3738 3738 }
3739 3739
3740 3740 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3741 3741 kmem_free(rqbuf_rw, SENSE_LENGTH);
3742 3742 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3743 3743 kmem_free(rqbuf_hd, SENSE_LENGTH);
3744 3744 }
3745 3745
3746 3746 /*
3747 3747 * Function: sd_read_unit_properties
3748 3748 *
3749 3749 * Description: The following implements a property lookup mechanism.
3750 3750 * Properties for particular disks (keyed on vendor, model
3751 3751 * and rev numbers) are sought in the sd.conf file via
3752 3752 * sd_process_sdconf_file(), and if not found there, are
3753 3753 * looked for in a list hardcoded in this driver via
3754 3754 * sd_process_sdconf_table() Once located the properties
3755 3755 * are used to update the driver unit structure.
3756 3756 *
3757 3757 * Arguments: un - driver soft state (unit) structure
3758 3758 */
3759 3759
3760 3760 static void
3761 3761 sd_read_unit_properties(struct sd_lun *un)
3762 3762 {
3763 3763 /*
3764 3764 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3765 3765 * the "sd-config-list" property (from the sd.conf file) or if
3766 3766 * there was not a match for the inquiry vid/pid. If this event
3767 3767 * occurs the static driver configuration table is searched for
3768 3768 * a match.
3769 3769 */
3770 3770 ASSERT(un != NULL);
3771 3771 if (sd_process_sdconf_file(un) == SD_FAILURE) {
3772 3772 sd_process_sdconf_table(un);
3773 3773 }
3774 3774
3775 3775 /* check for LSI device */
3776 3776 sd_is_lsi(un);
3777 3777
3778 3778
3779 3779 }
3780 3780
3781 3781
3782 3782 /*
3783 3783 * Function: sd_process_sdconf_file
3784 3784 *
3785 3785 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3786 3786 * driver's config file (ie, sd.conf) and update the driver
3787 3787 * soft state structure accordingly.
3788 3788 *
3789 3789 * Arguments: un - driver soft state (unit) structure
3790 3790 *
3791 3791 * Return Code: SD_SUCCESS - The properties were successfully set according
3792 3792 * to the driver configuration file.
3793 3793 * SD_FAILURE - The driver config list was not obtained or
3794 3794 * there was no vid/pid match. This indicates that
3795 3795 * the static config table should be used.
3796 3796 *
3797 3797 * The config file has a property, "sd-config-list". Currently we support
3798 3798 * two kinds of formats. For both formats, the value of this property
3799 3799 * is a list of duplets:
3800 3800 *
3801 3801 * sd-config-list=
3802 3802 * <duplet>,
3803 3803 * [,<duplet>]*;
3804 3804 *
3805 3805 * For the improved format, where
3806 3806 *
3807 3807 * <duplet>:= "<vid+pid>","<tunable-list>"
3808 3808 *
3809 3809 * and
3810 3810 *
3811 3811 * <tunable-list>:= <tunable> [, <tunable> ]*;
3812 3812 * <tunable> = <name> : <value>
3813 3813 *
3814 3814 * The <vid+pid> is the string that is returned by the target device on a
3815 3815 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3816 3816 * to apply to all target devices with the specified <vid+pid>.
3817 3817 *
3818 3818 * Each <tunable> is a "<name> : <value>" pair.
3819 3819 *
3820 3820 * For the old format, the structure of each duplet is as follows:
3821 3821 *
3822 3822 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3823 3823 *
3824 3824 * The first entry of the duplet is the device ID string (the concatenated
3825 3825 * vid & pid; not to be confused with a device_id). This is defined in
3826 3826 * the same way as in the sd_disk_table.
3827 3827 *
3828 3828 * The second part of the duplet is a string that identifies a
3829 3829 * data-property-name-list. The data-property-name-list is defined as
3830 3830 * follows:
3831 3831 *
3832 3832 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3833 3833 *
3834 3834 * The syntax of <data-property-name> depends on the <version> field.
3835 3835 *
3836 3836 * If version = SD_CONF_VERSION_1 we have the following syntax:
3837 3837 *
3838 3838 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3839 3839 *
3840 3840 * where the prop0 value will be used to set prop0 if bit0 set in the
3841 3841 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3842 3842 *
3843 3843 */
3844 3844
3845 3845 static int
3846 3846 sd_process_sdconf_file(struct sd_lun *un)
3847 3847 {
3848 3848 char **config_list = NULL;
3849 3849 uint_t nelements;
3850 3850 char *vidptr;
3851 3851 int vidlen;
3852 3852 char *dnlist_ptr;
3853 3853 char *dataname_ptr;
3854 3854 char *dataname_lasts;
3855 3855 int *data_list = NULL;
3856 3856 uint_t data_list_len;
3857 3857 int rval = SD_FAILURE;
3858 3858 int i;
3859 3859
3860 3860 ASSERT(un != NULL);
3861 3861
3862 3862 /* Obtain the configuration list associated with the .conf file */
3863 3863 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3864 3864 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
3865 3865 &config_list, &nelements) != DDI_PROP_SUCCESS) {
3866 3866 return (SD_FAILURE);
3867 3867 }
3868 3868
3869 3869 /*
3870 3870 * Compare vids in each duplet to the inquiry vid - if a match is
3871 3871 * made, get the data value and update the soft state structure
3872 3872 * accordingly.
3873 3873 *
3874 3874 * Each duplet should show as a pair of strings, return SD_FAILURE
3875 3875 * otherwise.
3876 3876 */
3877 3877 if (nelements & 1) {
3878 3878 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3879 3879 "sd-config-list should show as pairs of strings.\n");
3880 3880 if (config_list)
3881 3881 ddi_prop_free(config_list);
3882 3882 return (SD_FAILURE);
3883 3883 }
3884 3884
3885 3885 for (i = 0; i < nelements; i += 2) {
3886 3886 /*
3887 3887 * Note: The assumption here is that each vid entry is on
3888 3888 * a unique line from its associated duplet.
3889 3889 */
3890 3890 vidptr = config_list[i];
3891 3891 vidlen = (int)strlen(vidptr);
3892 3892 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) {
3893 3893 continue;
3894 3894 }
3895 3895
3896 3896 /*
3897 3897 * dnlist contains 1 or more blank separated
3898 3898 * data-property-name entries
3899 3899 */
3900 3900 dnlist_ptr = config_list[i + 1];
3901 3901
3902 3902 if (strchr(dnlist_ptr, ':') != NULL) {
3903 3903 /*
3904 3904 * Decode the improved format sd-config-list.
3905 3905 */
3906 3906 sd_nvpair_str_decode(un, dnlist_ptr);
3907 3907 } else {
3908 3908 /*
3909 3909 * The old format sd-config-list, loop through all
3910 3910 * data-property-name entries in the
3911 3911 * data-property-name-list
3912 3912 * setting the properties for each.
3913 3913 */
3914 3914 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t",
3915 3915 &dataname_lasts); dataname_ptr != NULL;
3916 3916 dataname_ptr = sd_strtok_r(NULL, " \t",
3917 3917 &dataname_lasts)) {
3918 3918 int version;
3919 3919
3920 3920 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3921 3921 "sd_process_sdconf_file: disk:%s, "
3922 3922 "data:%s\n", vidptr, dataname_ptr);
3923 3923
3924 3924 /* Get the data list */
3925 3925 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
3926 3926 SD_DEVINFO(un), 0, dataname_ptr, &data_list,
3927 3927 &data_list_len) != DDI_PROP_SUCCESS) {
3928 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3929 3929 "sd_process_sdconf_file: data "
3930 3930 "property (%s) has no value\n",
3931 3931 dataname_ptr);
3932 3932 continue;
3933 3933 }
3934 3934
3935 3935 version = data_list[0];
3936 3936
3937 3937 if (version == SD_CONF_VERSION_1) {
3938 3938 sd_tunables values;
3939 3939
3940 3940 /* Set the properties */
3941 3941 if (sd_chk_vers1_data(un, data_list[1],
3942 3942 &data_list[2], data_list_len,
3943 3943 dataname_ptr) == SD_SUCCESS) {
3944 3944 sd_get_tunables_from_conf(un,
3945 3945 data_list[1], &data_list[2],
3946 3946 &values);
3947 3947 sd_set_vers1_properties(un,
3948 3948 data_list[1], &values);
3949 3949 rval = SD_SUCCESS;
3950 3950 } else {
3951 3951 rval = SD_FAILURE;
3952 3952 }
3953 3953 } else {
3954 3954 scsi_log(SD_DEVINFO(un), sd_label,
3955 3955 CE_WARN, "data property %s version "
3956 3956 "0x%x is invalid.",
3957 3957 dataname_ptr, version);
3958 3958 rval = SD_FAILURE;
3959 3959 }
3960 3960 if (data_list)
3961 3961 ddi_prop_free(data_list);
3962 3962 }
3963 3963 }
3964 3964 }
3965 3965
3966 3966 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3967 3967 if (config_list) {
3968 3968 ddi_prop_free(config_list);
3969 3969 }
3970 3970
3971 3971 return (rval);
3972 3972 }
3973 3973
3974 3974 /*
3975 3975 * Function: sd_nvpair_str_decode()
3976 3976 *
3977 3977 * Description: Parse the improved format sd-config-list to get
3978 3978 * each entry of tunable, which includes a name-value pair.
3979 3979 * Then call sd_set_properties() to set the property.
3980 3980 *
3981 3981 * Arguments: un - driver soft state (unit) structure
3982 3982 * nvpair_str - the tunable list
3983 3983 */
3984 3984 static void
3985 3985 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
3986 3986 {
3987 3987 char *nv, *name, *value, *token;
3988 3988 char *nv_lasts, *v_lasts, *x_lasts;
3989 3989
3990 3990 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
3991 3991 nv = sd_strtok_r(NULL, ",", &nv_lasts)) {
3992 3992 token = sd_strtok_r(nv, ":", &v_lasts);
3993 3993 name = sd_strtok_r(token, " \t", &x_lasts);
3994 3994 token = sd_strtok_r(NULL, ":", &v_lasts);
3995 3995 value = sd_strtok_r(token, " \t", &x_lasts);
3996 3996 if (name == NULL || value == NULL) {
3997 3997 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3998 3998 "sd_nvpair_str_decode: "
3999 3999 "name or value is not valid!\n");
4000 4000 } else {
4001 4001 sd_set_properties(un, name, value);
4002 4002 }
4003 4003 }
4004 4004 }
4005 4005
4006 4006 /*
4007 4007 * Function: sd_strtok_r()
4008 4008 *
4009 4009 * Description: This function uses strpbrk and strspn to break
4010 4010 * string into tokens on sequentially subsequent calls. Return
4011 4011 * NULL when no non-separator characters remain. The first
4012 4012 * argument is NULL for subsequent calls.
4013 4013 */
4014 4014 static char *
4015 4015 sd_strtok_r(char *string, const char *sepset, char **lasts)
4016 4016 {
4017 4017 char *q, *r;
4018 4018
4019 4019 /* First or subsequent call */
4020 4020 if (string == NULL)
4021 4021 string = *lasts;
4022 4022
4023 4023 if (string == NULL)
4024 4024 return (NULL);
4025 4025
4026 4026 /* Skip leading separators */
4027 4027 q = string + strspn(string, sepset);
4028 4028
4029 4029 if (*q == '\0')
4030 4030 return (NULL);
4031 4031
4032 4032 if ((r = strpbrk(q, sepset)) == NULL)
4033 4033 *lasts = NULL;
4034 4034 else {
4035 4035 *r = '\0';
4036 4036 *lasts = r + 1;
4037 4037 }
4038 4038 return (q);
4039 4039 }
4040 4040
4041 4041 /*
4042 4042 * Function: sd_set_properties()
4043 4043 *
4044 4044 * Description: Set device properties based on the improved
4045 4045 * format sd-config-list.
4046 4046 *
4047 4047 * Arguments: un - driver soft state (unit) structure
4048 4048 * name - supported tunable name
4049 4049 * value - tunable value
4050 4050 */
4051 4051 static void
4052 4052 sd_set_properties(struct sd_lun *un, char *name, char *value)
4053 4053 {
4054 4054 char *endptr = NULL;
4055 4055 long val = 0;
4056 4056
4057 4057 if (strcasecmp(name, "cache-nonvolatile") == 0) {
4058 4058 if (strcasecmp(value, "true") == 0) {
4059 4059 un->un_f_suppress_cache_flush = TRUE;
4060 4060 } else if (strcasecmp(value, "false") == 0) {
4061 4061 un->un_f_suppress_cache_flush = FALSE;
4062 4062 } else {
4063 4063 goto value_invalid;
4064 4064 }
4065 4065 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4066 4066 "suppress_cache_flush flag set to %d\n",
4067 4067 un->un_f_suppress_cache_flush);
4068 4068 return;
4069 4069 }
4070 4070
4071 4071 if (strcasecmp(name, "controller-type") == 0) {
4072 4072 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4073 4073 un->un_ctype = val;
4074 4074 } else {
4075 4075 goto value_invalid;
4076 4076 }
4077 4077 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4078 4078 "ctype set to %d\n", un->un_ctype);
4079 4079 return;
4080 4080 }
4081 4081
4082 4082 if (strcasecmp(name, "delay-busy") == 0) {
4083 4083 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4084 4084 un->un_busy_timeout = drv_usectohz(val / 1000);
4085 4085 } else {
4086 4086 goto value_invalid;
4087 4087 }
4088 4088 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4089 4089 "busy_timeout set to %d\n", un->un_busy_timeout);
4090 4090 return;
4091 4091 }
4092 4092
4093 4093 if (strcasecmp(name, "disksort") == 0) {
4094 4094 if (strcasecmp(value, "true") == 0) {
4095 4095 un->un_f_disksort_disabled = FALSE;
4096 4096 } else if (strcasecmp(value, "false") == 0) {
4097 4097 un->un_f_disksort_disabled = TRUE;
4098 4098 } else {
4099 4099 goto value_invalid;
4100 4100 }
4101 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4102 4102 "disksort disabled flag set to %d\n",
4103 4103 un->un_f_disksort_disabled);
4104 4104 return;
4105 4105 }
4106 4106
4107 4107 if (strcasecmp(name, "power-condition") == 0) {
4108 4108 if (strcasecmp(value, "true") == 0) {
4109 4109 un->un_f_power_condition_disabled = FALSE;
4110 4110 } else if (strcasecmp(value, "false") == 0) {
4111 4111 un->un_f_power_condition_disabled = TRUE;
4112 4112 } else {
4113 4113 goto value_invalid;
4114 4114 }
4115 4115 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4116 4116 "power condition disabled flag set to %d\n",
4117 4117 un->un_f_power_condition_disabled);
4118 4118 return;
4119 4119 }
4120 4120
4121 4121 if (strcasecmp(name, "timeout-releasereservation") == 0) {
4122 4122 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4123 4123 un->un_reserve_release_time = val;
4124 4124 } else {
4125 4125 goto value_invalid;
4126 4126 }
4127 4127 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4128 4128 "reservation release timeout set to %d\n",
4129 4129 un->un_reserve_release_time);
4130 4130 return;
4131 4131 }
4132 4132
4133 4133 if (strcasecmp(name, "reset-lun") == 0) {
4134 4134 if (strcasecmp(value, "true") == 0) {
4135 4135 un->un_f_lun_reset_enabled = TRUE;
4136 4136 } else if (strcasecmp(value, "false") == 0) {
4137 4137 un->un_f_lun_reset_enabled = FALSE;
4138 4138 } else {
4139 4139 goto value_invalid;
4140 4140 }
4141 4141 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4142 4142 "lun reset enabled flag set to %d\n",
4143 4143 un->un_f_lun_reset_enabled);
4144 4144 return;
4145 4145 }
4146 4146
4147 4147 if (strcasecmp(name, "retries-busy") == 0) {
4148 4148 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4149 4149 un->un_busy_retry_count = val;
4150 4150 } else {
4151 4151 goto value_invalid;
4152 4152 }
4153 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4154 4154 "busy retry count set to %d\n", un->un_busy_retry_count);
4155 4155 return;
4156 4156 }
4157 4157
4158 4158 if (strcasecmp(name, "retries-timeout") == 0) {
4159 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4160 4160 un->un_retry_count = val;
4161 4161 } else {
4162 4162 goto value_invalid;
4163 4163 }
4164 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4165 4165 "timeout retry count set to %d\n", un->un_retry_count);
4166 4166 return;
4167 4167 }
4168 4168
4169 4169 if (strcasecmp(name, "retries-notready") == 0) {
4170 4170 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4171 4171 un->un_notready_retry_count = val;
4172 4172 } else {
4173 4173 goto value_invalid;
4174 4174 }
4175 4175 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4176 4176 "notready retry count set to %d\n",
4177 4177 un->un_notready_retry_count);
4178 4178 return;
4179 4179 }
4180 4180
4181 4181 if (strcasecmp(name, "retries-reset") == 0) {
4182 4182 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4183 4183 un->un_reset_retry_count = val;
4184 4184 } else {
4185 4185 goto value_invalid;
4186 4186 }
4187 4187 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4188 4188 "reset retry count set to %d\n",
4189 4189 un->un_reset_retry_count);
4190 4190 return;
4191 4191 }
4192 4192
4193 4193 if (strcasecmp(name, "throttle-max") == 0) {
4194 4194 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4195 4195 un->un_saved_throttle = un->un_throttle = val;
4196 4196 } else {
4197 4197 goto value_invalid;
4198 4198 }
4199 4199 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4200 4200 "throttle set to %d\n", un->un_throttle);
4201 4201 }
4202 4202
4203 4203 if (strcasecmp(name, "throttle-min") == 0) {
4204 4204 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4205 4205 un->un_min_throttle = val;
4206 4206 } else {
4207 4207 goto value_invalid;
4208 4208 }
4209 4209 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4210 4210 "min throttle set to %d\n", un->un_min_throttle);
4211 4211 }
4212 4212
4213 4213 if (strcasecmp(name, "rmw-type") == 0) {
4214 4214 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4215 4215 un->un_f_rmw_type = val;
4216 4216 } else {
4217 4217 goto value_invalid;
4218 4218 }
4219 4219 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4220 4220 "RMW type set to %d\n", un->un_f_rmw_type);
4221 4221 }
4222 4222
4223 4223 if (strcasecmp(name, "physical-block-size") == 0) {
4224 4224 if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4225 4225 ISP2(val) && val >= un->un_tgt_blocksize &&
4226 4226 val >= un->un_sys_blocksize) {
4227 4227 un->un_phy_blocksize = val;
4228 4228 } else {
4229 4229 goto value_invalid;
4230 4230 }
4231 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4232 4232 "physical block size set to %d\n", un->un_phy_blocksize);
4233 4233 }
4234 4234
4235 4235 if (strcasecmp(name, "retries-victim") == 0) {
4236 4236 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4237 4237 un->un_victim_retry_count = val;
4238 4238 } else {
4239 4239 goto value_invalid;
4240 4240 }
4241 4241 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4242 4242 "victim retry count set to %d\n",
4243 4243 un->un_victim_retry_count);
4244 4244 return;
4245 4245 }
4246 4246
4247 4247 /*
4248 4248 * Validate the throttle values.
4249 4249 * If any of the numbers are invalid, set everything to defaults.
4250 4250 */
4251 4251 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4252 4252 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4253 4253 (un->un_min_throttle > un->un_throttle)) {
4254 4254 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4255 4255 un->un_min_throttle = sd_min_throttle;
4256 4256 }
4257 4257
4258 4258 if (strcasecmp(name, "mmc-gesn-polling") == 0) {
4259 4259 if (strcasecmp(value, "true") == 0) {
4260 4260 un->un_f_mmc_gesn_polling = TRUE;
4261 4261 } else if (strcasecmp(value, "false") == 0) {
4262 4262 un->un_f_mmc_gesn_polling = FALSE;
4263 4263 } else {
4264 4264 goto value_invalid;
4265 4265 }
4266 4266 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4267 4267 "mmc-gesn-polling set to %d\n",
4268 4268 un->un_f_mmc_gesn_polling);
4269 4269 }
4270 4270
4271 4271 return;
4272 4272
4273 4273 value_invalid:
4274 4274 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4275 4275 "value of prop %s is invalid\n", name);
4276 4276 }
4277 4277
4278 4278 /*
4279 4279 * Function: sd_get_tunables_from_conf()
4280 4280 *
4281 4281 *
4282 4282 * This function reads the data list from the sd.conf file and pulls
4283 4283 * the values that can have numeric values as arguments and places
4284 4284 * the values in the appropriate sd_tunables member.
4285 4285 * Since the order of the data list members varies across platforms
4286 4286 * This function reads them from the data list in a platform specific
4287 4287 * order and places them into the correct sd_tunable member that is
4288 4288 * consistent across all platforms.
4289 4289 */
4290 4290 static void
4291 4291 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
4292 4292 sd_tunables *values)
4293 4293 {
4294 4294 int i;
4295 4295 int mask;
4296 4296
4297 4297 bzero(values, sizeof (sd_tunables));
4298 4298
4299 4299 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4300 4300
4301 4301 mask = 1 << i;
4302 4302 if (mask > flags) {
4303 4303 break;
4304 4304 }
4305 4305
4306 4306 switch (mask & flags) {
4307 4307 case 0: /* This mask bit not set in flags */
4308 4308 continue;
4309 4309 case SD_CONF_BSET_THROTTLE:
4310 4310 values->sdt_throttle = data_list[i];
4311 4311 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4312 4312 "sd_get_tunables_from_conf: throttle = %d\n",
4313 4313 values->sdt_throttle);
4314 4314 break;
4315 4315 case SD_CONF_BSET_CTYPE:
4316 4316 values->sdt_ctype = data_list[i];
4317 4317 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4318 4318 "sd_get_tunables_from_conf: ctype = %d\n",
4319 4319 values->sdt_ctype);
4320 4320 break;
4321 4321 case SD_CONF_BSET_NRR_COUNT:
4322 4322 values->sdt_not_rdy_retries = data_list[i];
4323 4323 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4324 4324 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4325 4325 values->sdt_not_rdy_retries);
4326 4326 break;
4327 4327 case SD_CONF_BSET_BSY_RETRY_COUNT:
4328 4328 values->sdt_busy_retries = data_list[i];
4329 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4330 4330 "sd_get_tunables_from_conf: busy_retries = %d\n",
4331 4331 values->sdt_busy_retries);
4332 4332 break;
4333 4333 case SD_CONF_BSET_RST_RETRIES:
4334 4334 values->sdt_reset_retries = data_list[i];
4335 4335 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4336 4336 "sd_get_tunables_from_conf: reset_retries = %d\n",
4337 4337 values->sdt_reset_retries);
4338 4338 break;
4339 4339 case SD_CONF_BSET_RSV_REL_TIME:
4340 4340 values->sdt_reserv_rel_time = data_list[i];
4341 4341 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4342 4342 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4343 4343 values->sdt_reserv_rel_time);
4344 4344 break;
4345 4345 case SD_CONF_BSET_MIN_THROTTLE:
4346 4346 values->sdt_min_throttle = data_list[i];
4347 4347 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4348 4348 "sd_get_tunables_from_conf: min_throttle = %d\n",
4349 4349 values->sdt_min_throttle);
4350 4350 break;
4351 4351 case SD_CONF_BSET_DISKSORT_DISABLED:
4352 4352 values->sdt_disk_sort_dis = data_list[i];
4353 4353 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4354 4354 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4355 4355 values->sdt_disk_sort_dis);
4356 4356 break;
4357 4357 case SD_CONF_BSET_LUN_RESET_ENABLED:
4358 4358 values->sdt_lun_reset_enable = data_list[i];
4359 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4360 4360 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4361 4361 "\n", values->sdt_lun_reset_enable);
4362 4362 break;
4363 4363 case SD_CONF_BSET_CACHE_IS_NV:
4364 4364 values->sdt_suppress_cache_flush = data_list[i];
4365 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4366 4366 "sd_get_tunables_from_conf: \
4367 4367 suppress_cache_flush = %d"
4368 4368 "\n", values->sdt_suppress_cache_flush);
4369 4369 break;
4370 4370 case SD_CONF_BSET_PC_DISABLED:
4371 4371 values->sdt_disk_sort_dis = data_list[i];
4372 4372 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4373 4373 "sd_get_tunables_from_conf: power_condition_dis = "
4374 4374 "%d\n", values->sdt_power_condition_dis);
4375 4375 break;
4376 4376 }
4377 4377 }
4378 4378 }
4379 4379
4380 4380 /*
4381 4381 * Function: sd_process_sdconf_table
4382 4382 *
4383 4383 * Description: Search the static configuration table for a match on the
4384 4384 * inquiry vid/pid and update the driver soft state structure
4385 4385 * according to the table property values for the device.
4386 4386 *
4387 4387 * The form of a configuration table entry is:
4388 4388 * <vid+pid>,<flags>,<property-data>
4389 4389 * "SEAGATE ST42400N",1,0x40000,
4390 4390 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4391 4391 *
4392 4392 * Arguments: un - driver soft state (unit) structure
4393 4393 */
4394 4394
4395 4395 static void
4396 4396 sd_process_sdconf_table(struct sd_lun *un)
4397 4397 {
4398 4398 char *id = NULL;
4399 4399 int table_index;
4400 4400 int idlen;
4401 4401
4402 4402 ASSERT(un != NULL);
4403 4403 for (table_index = 0; table_index < sd_disk_table_size;
4404 4404 table_index++) {
4405 4405 id = sd_disk_table[table_index].device_id;
4406 4406 idlen = strlen(id);
4407 4407
4408 4408 /*
4409 4409 * The static configuration table currently does not
4410 4410 * implement version 10 properties. Additionally,
4411 4411 * multiple data-property-name entries are not
4412 4412 * implemented in the static configuration table.
4413 4413 */
4414 4414 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4415 4415 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4416 4416 "sd_process_sdconf_table: disk %s\n", id);
4417 4417 sd_set_vers1_properties(un,
4418 4418 sd_disk_table[table_index].flags,
4419 4419 sd_disk_table[table_index].properties);
4420 4420 break;
4421 4421 }
4422 4422 }
4423 4423 }
4424 4424
4425 4425
4426 4426 /*
4427 4427 * Function: sd_sdconf_id_match
4428 4428 *
4429 4429 * Description: This local function implements a case sensitive vid/pid
4430 4430 * comparison as well as the boundary cases of wild card and
4431 4431 * multiple blanks.
4432 4432 *
4433 4433 * Note: An implicit assumption made here is that the scsi
4434 4434 * inquiry structure will always keep the vid, pid and
4435 4435 * revision strings in consecutive sequence, so they can be
4436 4436 * read as a single string. If this assumption is not the
4437 4437 * case, a separate string, to be used for the check, needs
4438 4438 * to be built with these strings concatenated.
4439 4439 *
4440 4440 * Arguments: un - driver soft state (unit) structure
4441 4441 * id - table or config file vid/pid
4442 4442 * idlen - length of the vid/pid (bytes)
4443 4443 *
4444 4444 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4445 4445 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4446 4446 */
4447 4447
4448 4448 static int
4449 4449 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
4450 4450 {
4451 4451 struct scsi_inquiry *sd_inq;
4452 4452 int rval = SD_SUCCESS;
4453 4453
4454 4454 ASSERT(un != NULL);
4455 4455 sd_inq = un->un_sd->sd_inq;
4456 4456 ASSERT(id != NULL);
4457 4457
4458 4458 /*
4459 4459 * We use the inq_vid as a pointer to a buffer containing the
4460 4460 * vid and pid and use the entire vid/pid length of the table
4461 4461 * entry for the comparison. This works because the inq_pid
4462 4462 * data member follows inq_vid in the scsi_inquiry structure.
4463 4463 */
4464 4464 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
4465 4465 /*
4466 4466 * The user id string is compared to the inquiry vid/pid
4467 4467 * using a case insensitive comparison and ignoring
4468 4468 * multiple spaces.
4469 4469 */
4470 4470 rval = sd_blank_cmp(un, id, idlen);
4471 4471 if (rval != SD_SUCCESS) {
4472 4472 /*
4473 4473 * User id strings that start and end with a "*"
4474 4474 * are a special case. These do not have a
4475 4475 * specific vendor, and the product string can
4476 4476 * appear anywhere in the 16 byte PID portion of
4477 4477 * the inquiry data. This is a simple strstr()
4478 4478 * type search for the user id in the inquiry data.
4479 4479 */
4480 4480 if ((id[0] == '*') && (id[idlen - 1] == '*')) {
4481 4481 char *pidptr = &id[1];
4482 4482 int i;
4483 4483 int j;
4484 4484 int pidstrlen = idlen - 2;
4485 4485 j = sizeof (SD_INQUIRY(un)->inq_pid) -
4486 4486 pidstrlen;
4487 4487
4488 4488 if (j < 0) {
4489 4489 return (SD_FAILURE);
4490 4490 }
4491 4491 for (i = 0; i < j; i++) {
4492 4492 if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
4493 4493 pidptr, pidstrlen) == 0) {
4494 4494 rval = SD_SUCCESS;
4495 4495 break;
4496 4496 }
4497 4497 }
4498 4498 }
4499 4499 }
4500 4500 }
4501 4501 return (rval);
4502 4502 }
4503 4503
4504 4504
4505 4505 /*
4506 4506 * Function: sd_blank_cmp
4507 4507 *
4508 4508 * Description: If the id string starts and ends with a space, treat
4509 4509 * multiple consecutive spaces as equivalent to a single
4510 4510 * space. For example, this causes a sd_disk_table entry
4511 4511 * of " NEC CDROM " to match a device's id string of
4512 4512 * "NEC CDROM".
4513 4513 *
4514 4514 * Note: The success exit condition for this routine is if
4515 4515 * the pointer to the table entry is '\0' and the cnt of
4516 4516 * the inquiry length is zero. This will happen if the inquiry
4517 4517 * string returned by the device is padded with spaces to be
4518 4518 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4519 4519 * SCSI spec states that the inquiry string is to be padded with
4520 4520 * spaces.
4521 4521 *
4522 4522 * Arguments: un - driver soft state (unit) structure
4523 4523 * id - table or config file vid/pid
4524 4524 * idlen - length of the vid/pid (bytes)
4525 4525 *
4526 4526 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4527 4527 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4528 4528 */
4529 4529
4530 4530 static int
4531 4531 sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
4532 4532 {
4533 4533 char *p1;
4534 4534 char *p2;
4535 4535 int cnt;
4536 4536 cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
4537 4537 sizeof (SD_INQUIRY(un)->inq_pid);
4538 4538
4539 4539 ASSERT(un != NULL);
4540 4540 p2 = un->un_sd->sd_inq->inq_vid;
4541 4541 ASSERT(id != NULL);
4542 4542 p1 = id;
4543 4543
4544 4544 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
4545 4545 /*
4546 4546 * Note: string p1 is terminated by a NUL but string p2
4547 4547 * isn't. The end of p2 is determined by cnt.
4548 4548 */
4549 4549 for (;;) {
4550 4550 /* skip over any extra blanks in both strings */
4551 4551 while ((*p1 != '\0') && (*p1 == ' ')) {
4552 4552 p1++;
4553 4553 }
4554 4554 while ((cnt != 0) && (*p2 == ' ')) {
4555 4555 p2++;
4556 4556 cnt--;
4557 4557 }
4558 4558
4559 4559 /* compare the two strings */
4560 4560 if ((cnt == 0) ||
4561 4561 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
4562 4562 break;
4563 4563 }
4564 4564 while ((cnt > 0) &&
4565 4565 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
4566 4566 p1++;
4567 4567 p2++;
4568 4568 cnt--;
4569 4569 }
4570 4570 }
4571 4571 }
4572 4572
4573 4573 /* return SD_SUCCESS if both strings match */
4574 4574 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
4575 4575 }
4576 4576
4577 4577
4578 4578 /*
4579 4579 * Function: sd_chk_vers1_data
4580 4580 *
4581 4581 * Description: Verify the version 1 device properties provided by the
4582 4582 * user via the configuration file
4583 4583 *
4584 4584 * Arguments: un - driver soft state (unit) structure
4585 4585 * flags - integer mask indicating properties to be set
4586 4586 * prop_list - integer list of property values
4587 4587 * list_len - number of the elements
4588 4588 *
4589 4589 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4590 4590 * SD_FAILURE - Indicates the user provided data is invalid
4591 4591 */
4592 4592
4593 4593 static int
4594 4594 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
4595 4595 int list_len, char *dataname_ptr)
4596 4596 {
4597 4597 int i;
4598 4598 int mask = 1;
4599 4599 int index = 0;
4600 4600
4601 4601 ASSERT(un != NULL);
4602 4602
4603 4603 /* Check for a NULL property name and list */
4604 4604 if (dataname_ptr == NULL) {
4605 4605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4606 4606 "sd_chk_vers1_data: NULL data property name.");
4607 4607 return (SD_FAILURE);
4608 4608 }
4609 4609 if (prop_list == NULL) {
4610 4610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4611 4611 "sd_chk_vers1_data: %s NULL data property list.",
4612 4612 dataname_ptr);
4613 4613 return (SD_FAILURE);
4614 4614 }
4615 4615
4616 4616 /* Display a warning if undefined bits are set in the flags */
4617 4617 if (flags & ~SD_CONF_BIT_MASK) {
4618 4618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4619 4619 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4620 4620 "Properties not set.",
4621 4621 (flags & ~SD_CONF_BIT_MASK), dataname_ptr);
4622 4622 return (SD_FAILURE);
4623 4623 }
4624 4624
4625 4625 /*
4626 4626 * Verify the length of the list by identifying the highest bit set
4627 4627 * in the flags and validating that the property list has a length
4628 4628 * up to the index of this bit.
4629 4629 */
4630 4630 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4631 4631 if (flags & mask) {
4632 4632 index++;
4633 4633 }
4634 4634 mask = 1 << i;
4635 4635 }
4636 4636 if (list_len < (index + 2)) {
4637 4637 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4638 4638 "sd_chk_vers1_data: "
4639 4639 "Data property list %s size is incorrect. "
4640 4640 "Properties not set.", dataname_ptr);
4641 4641 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
4642 4642 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
4643 4643 return (SD_FAILURE);
4644 4644 }
4645 4645 return (SD_SUCCESS);
4646 4646 }
4647 4647
4648 4648
4649 4649 /*
4650 4650 * Function: sd_set_vers1_properties
4651 4651 *
4652 4652 * Description: Set version 1 device properties based on a property list
4653 4653 * retrieved from the driver configuration file or static
4654 4654 * configuration table. Version 1 properties have the format:
4655 4655 *
4656 4656 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4657 4657 *
4658 4658 * where the prop0 value will be used to set prop0 if bit0
4659 4659 * is set in the flags
4660 4660 *
4661 4661 * Arguments: un - driver soft state (unit) structure
4662 4662 * flags - integer mask indicating properties to be set
4663 4663 * prop_list - integer list of property values
4664 4664 */
4665 4665
4666 4666 static void
4667 4667 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
4668 4668 {
4669 4669 ASSERT(un != NULL);
4670 4670
4671 4671 /*
4672 4672 * Set the flag to indicate cache is to be disabled. An attempt
4673 4673 * to disable the cache via sd_cache_control() will be made
4674 4674 * later during attach once the basic initialization is complete.
4675 4675 */
4676 4676 if (flags & SD_CONF_BSET_NOCACHE) {
4677 4677 un->un_f_opt_disable_cache = TRUE;
4678 4678 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4679 4679 "sd_set_vers1_properties: caching disabled flag set\n");
4680 4680 }
4681 4681
4682 4682 /* CD-specific configuration parameters */
4683 4683 if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
4684 4684 un->un_f_cfg_playmsf_bcd = TRUE;
4685 4685 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4686 4686 "sd_set_vers1_properties: playmsf_bcd set\n");
4687 4687 }
4688 4688 if (flags & SD_CONF_BSET_READSUB_BCD) {
4689 4689 un->un_f_cfg_readsub_bcd = TRUE;
4690 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4691 4691 "sd_set_vers1_properties: readsub_bcd set\n");
4692 4692 }
4693 4693 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
4694 4694 un->un_f_cfg_read_toc_trk_bcd = TRUE;
4695 4695 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4696 4696 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4697 4697 }
4698 4698 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
4699 4699 un->un_f_cfg_read_toc_addr_bcd = TRUE;
4700 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4701 4701 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4702 4702 }
4703 4703 if (flags & SD_CONF_BSET_NO_READ_HEADER) {
4704 4704 un->un_f_cfg_no_read_header = TRUE;
4705 4705 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4706 4706 "sd_set_vers1_properties: no_read_header set\n");
4707 4707 }
4708 4708 if (flags & SD_CONF_BSET_READ_CD_XD4) {
4709 4709 un->un_f_cfg_read_cd_xd4 = TRUE;
4710 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4711 4711 "sd_set_vers1_properties: read_cd_xd4 set\n");
4712 4712 }
4713 4713
4714 4714 /* Support for devices which do not have valid/unique serial numbers */
4715 4715 if (flags & SD_CONF_BSET_FAB_DEVID) {
4716 4716 un->un_f_opt_fab_devid = TRUE;
4717 4717 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4718 4718 "sd_set_vers1_properties: fab_devid bit set\n");
4719 4719 }
4720 4720
4721 4721 /* Support for user throttle configuration */
4722 4722 if (flags & SD_CONF_BSET_THROTTLE) {
4723 4723 ASSERT(prop_list != NULL);
4724 4724 un->un_saved_throttle = un->un_throttle =
4725 4725 prop_list->sdt_throttle;
4726 4726 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4727 4727 "sd_set_vers1_properties: throttle set to %d\n",
4728 4728 prop_list->sdt_throttle);
4729 4729 }
4730 4730
4731 4731 /* Set the per disk retry count according to the conf file or table. */
4732 4732 if (flags & SD_CONF_BSET_NRR_COUNT) {
4733 4733 ASSERT(prop_list != NULL);
4734 4734 if (prop_list->sdt_not_rdy_retries) {
4735 4735 un->un_notready_retry_count =
4736 4736 prop_list->sdt_not_rdy_retries;
4737 4737 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4738 4738 "sd_set_vers1_properties: not ready retry count"
4739 4739 " set to %d\n", un->un_notready_retry_count);
4740 4740 }
4741 4741 }
4742 4742
4743 4743 /* The controller type is reported for generic disk driver ioctls */
4744 4744 if (flags & SD_CONF_BSET_CTYPE) {
4745 4745 ASSERT(prop_list != NULL);
4746 4746 switch (prop_list->sdt_ctype) {
4747 4747 case CTYPE_CDROM:
4748 4748 un->un_ctype = prop_list->sdt_ctype;
4749 4749 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4750 4750 "sd_set_vers1_properties: ctype set to "
4751 4751 "CTYPE_CDROM\n");
4752 4752 break;
4753 4753 case CTYPE_CCS:
4754 4754 un->un_ctype = prop_list->sdt_ctype;
4755 4755 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4756 4756 "sd_set_vers1_properties: ctype set to "
4757 4757 "CTYPE_CCS\n");
4758 4758 break;
4759 4759 case CTYPE_ROD: /* RW optical */
4760 4760 un->un_ctype = prop_list->sdt_ctype;
4761 4761 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4762 4762 "sd_set_vers1_properties: ctype set to "
4763 4763 "CTYPE_ROD\n");
4764 4764 break;
4765 4765 default:
4766 4766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4767 4767 "sd_set_vers1_properties: Could not set "
4768 4768 "invalid ctype value (%d)",
4769 4769 prop_list->sdt_ctype);
4770 4770 }
4771 4771 }
4772 4772
4773 4773 /* Purple failover timeout */
4774 4774 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
4775 4775 ASSERT(prop_list != NULL);
4776 4776 un->un_busy_retry_count =
4777 4777 prop_list->sdt_busy_retries;
4778 4778 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4779 4779 "sd_set_vers1_properties: "
4780 4780 "busy retry count set to %d\n",
4781 4781 un->un_busy_retry_count);
4782 4782 }
4783 4783
4784 4784 /* Purple reset retry count */
4785 4785 if (flags & SD_CONF_BSET_RST_RETRIES) {
4786 4786 ASSERT(prop_list != NULL);
4787 4787 un->un_reset_retry_count =
4788 4788 prop_list->sdt_reset_retries;
4789 4789 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4790 4790 "sd_set_vers1_properties: "
4791 4791 "reset retry count set to %d\n",
4792 4792 un->un_reset_retry_count);
4793 4793 }
4794 4794
4795 4795 /* Purple reservation release timeout */
4796 4796 if (flags & SD_CONF_BSET_RSV_REL_TIME) {
4797 4797 ASSERT(prop_list != NULL);
4798 4798 un->un_reserve_release_time =
4799 4799 prop_list->sdt_reserv_rel_time;
4800 4800 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4801 4801 "sd_set_vers1_properties: "
4802 4802 "reservation release timeout set to %d\n",
4803 4803 un->un_reserve_release_time);
4804 4804 }
4805 4805
4806 4806 /*
4807 4807 * Driver flag telling the driver to verify that no commands are pending
4808 4808 * for a device before issuing a Test Unit Ready. This is a workaround
4809 4809 * for a firmware bug in some Seagate eliteI drives.
4810 4810 */
4811 4811 if (flags & SD_CONF_BSET_TUR_CHECK) {
4812 4812 un->un_f_cfg_tur_check = TRUE;
4813 4813 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4814 4814 "sd_set_vers1_properties: tur queue check set\n");
4815 4815 }
4816 4816
4817 4817 if (flags & SD_CONF_BSET_MIN_THROTTLE) {
4818 4818 un->un_min_throttle = prop_list->sdt_min_throttle;
4819 4819 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4820 4820 "sd_set_vers1_properties: min throttle set to %d\n",
4821 4821 un->un_min_throttle);
4822 4822 }
4823 4823
4824 4824 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
4825 4825 un->un_f_disksort_disabled =
4826 4826 (prop_list->sdt_disk_sort_dis != 0) ?
4827 4827 TRUE : FALSE;
4828 4828 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4829 4829 "sd_set_vers1_properties: disksort disabled "
4830 4830 "flag set to %d\n",
4831 4831 prop_list->sdt_disk_sort_dis);
4832 4832 }
4833 4833
4834 4834 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
4835 4835 un->un_f_lun_reset_enabled =
4836 4836 (prop_list->sdt_lun_reset_enable != 0) ?
4837 4837 TRUE : FALSE;
4838 4838 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4839 4839 "sd_set_vers1_properties: lun reset enabled "
4840 4840 "flag set to %d\n",
4841 4841 prop_list->sdt_lun_reset_enable);
4842 4842 }
4843 4843
4844 4844 if (flags & SD_CONF_BSET_CACHE_IS_NV) {
4845 4845 un->un_f_suppress_cache_flush =
4846 4846 (prop_list->sdt_suppress_cache_flush != 0) ?
4847 4847 TRUE : FALSE;
4848 4848 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4849 4849 "sd_set_vers1_properties: suppress_cache_flush "
4850 4850 "flag set to %d\n",
4851 4851 prop_list->sdt_suppress_cache_flush);
4852 4852 }
4853 4853
4854 4854 if (flags & SD_CONF_BSET_PC_DISABLED) {
4855 4855 un->un_f_power_condition_disabled =
4856 4856 (prop_list->sdt_power_condition_dis != 0) ?
4857 4857 TRUE : FALSE;
4858 4858 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4859 4859 "sd_set_vers1_properties: power_condition_disabled "
4860 4860 "flag set to %d\n",
4861 4861 prop_list->sdt_power_condition_dis);
4862 4862 }
4863 4863
4864 4864 /*
4865 4865 * Validate the throttle values.
4866 4866 * If any of the numbers are invalid, set everything to defaults.
4867 4867 */
4868 4868 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4869 4869 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4870 4870 (un->un_min_throttle > un->un_throttle)) {
4871 4871 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4872 4872 un->un_min_throttle = sd_min_throttle;
4873 4873 }
4874 4874 }
4875 4875
4876 4876 /*
4877 4877 * Function: sd_is_lsi()
4878 4878 *
4879 4879 * Description: Check for lsi devices, step through the static device
4880 4880 * table to match vid/pid.
4881 4881 *
4882 4882 * Args: un - ptr to sd_lun
4883 4883 *
4884 4884 * Notes: When creating new LSI property, need to add the new LSI property
4885 4885 * to this function.
4886 4886 */
4887 4887 static void
4888 4888 sd_is_lsi(struct sd_lun *un)
4889 4889 {
4890 4890 char *id = NULL;
4891 4891 int table_index;
4892 4892 int idlen;
4893 4893 void *prop;
4894 4894
4895 4895 ASSERT(un != NULL);
4896 4896 for (table_index = 0; table_index < sd_disk_table_size;
4897 4897 table_index++) {
4898 4898 id = sd_disk_table[table_index].device_id;
4899 4899 idlen = strlen(id);
4900 4900 if (idlen == 0) {
4901 4901 continue;
4902 4902 }
4903 4903
4904 4904 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4905 4905 prop = sd_disk_table[table_index].properties;
4906 4906 if (prop == &lsi_properties ||
4907 4907 prop == &lsi_oem_properties ||
4908 4908 prop == &lsi_properties_scsi ||
4909 4909 prop == &symbios_properties) {
4910 4910 un->un_f_cfg_is_lsi = TRUE;
4911 4911 }
4912 4912 break;
4913 4913 }
4914 4914 }
4915 4915 }
4916 4916
4917 4917 /*
4918 4918 * Function: sd_get_physical_geometry
4919 4919 *
4920 4920 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4921 4921 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4922 4922 * target, and use this information to initialize the physical
4923 4923 * geometry cache specified by pgeom_p.
4924 4924 *
4925 4925 * MODE SENSE is an optional command, so failure in this case
4926 4926 * does not necessarily denote an error. We want to use the
4927 4927 * MODE SENSE commands to derive the physical geometry of the
4928 4928 * device, but if either command fails, the logical geometry is
4929 4929 * used as the fallback for disk label geometry in cmlb.
4930 4930 *
4931 4931 * This requires that un->un_blockcount and un->un_tgt_blocksize
4932 4932 * have already been initialized for the current target and
4933 4933 * that the current values be passed as args so that we don't
4934 4934 * end up ever trying to use -1 as a valid value. This could
4935 4935 * happen if either value is reset while we're not holding
4936 4936 * the mutex.
4937 4937 *
4938 4938 * Arguments: un - driver soft state (unit) structure
4939 4939 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4940 4940 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4941 4941 * to use the USCSI "direct" chain and bypass the normal
4942 4942 * command waitq.
4943 4943 *
4944 4944 * Context: Kernel thread only (can sleep).
4945 4945 */
4946 4946
4947 4947 static int
4948 4948 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
4949 4949 diskaddr_t capacity, int lbasize, int path_flag)
4950 4950 {
4951 4951 struct mode_format *page3p;
4952 4952 struct mode_geometry *page4p;
4953 4953 struct mode_header *headerp;
4954 4954 int sector_size;
4955 4955 int nsect;
4956 4956 int nhead;
4957 4957 int ncyl;
4958 4958 int intrlv;
4959 4959 int spc;
4960 4960 diskaddr_t modesense_capacity;
4961 4961 int rpm;
4962 4962 int bd_len;
4963 4963 int mode_header_length;
4964 4964 uchar_t *p3bufp;
4965 4965 uchar_t *p4bufp;
4966 4966 int cdbsize;
4967 4967 int ret = EIO;
4968 4968 sd_ssc_t *ssc;
4969 4969 int status;
4970 4970
4971 4971 ASSERT(un != NULL);
4972 4972
4973 4973 if (lbasize == 0) {
4974 4974 if (ISCD(un)) {
4975 4975 lbasize = 2048;
4976 4976 } else {
4977 4977 lbasize = un->un_sys_blocksize;
4978 4978 }
4979 4979 }
4980 4980 pgeom_p->g_secsize = (unsigned short)lbasize;
4981 4981
4982 4982 /*
4983 4983 * If the unit is a cd/dvd drive MODE SENSE page three
4984 4984 * and MODE SENSE page four are reserved (see SBC spec
4985 4985 * and MMC spec). To prevent soft errors just return
4986 4986 * using the default LBA size.
4987 4987 */
4988 4988 if (ISCD(un))
4989 4989 return (ret);
4990 4990
4991 4991 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4992 4992
4993 4993 /*
4994 4994 * Retrieve MODE SENSE page 3 - Format Device Page
4995 4995 */
4996 4996 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
4997 4997 ssc = sd_ssc_init(un);
4998 4998 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
4999 4999 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
5000 5000 if (status != 0) {
5001 5001 SD_ERROR(SD_LOG_COMMON, un,
5002 5002 "sd_get_physical_geometry: mode sense page 3 failed\n");
5003 5003 goto page3_exit;
5004 5004 }
5005 5005
5006 5006 /*
5007 5007 * Determine size of Block Descriptors in order to locate the mode
5008 5008 * page data. ATAPI devices return 0, SCSI devices should return
5009 5009 * MODE_BLK_DESC_LENGTH.
5010 5010 */
5011 5011 headerp = (struct mode_header *)p3bufp;
5012 5012 if (un->un_f_cfg_is_atapi == TRUE) {
5013 5013 struct mode_header_grp2 *mhp =
5014 5014 (struct mode_header_grp2 *)headerp;
5015 5015 mode_header_length = MODE_HEADER_LENGTH_GRP2;
5016 5016 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5017 5017 } else {
5018 5018 mode_header_length = MODE_HEADER_LENGTH;
5019 5019 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5020 5020 }
5021 5021
5022 5022 if (bd_len > MODE_BLK_DESC_LENGTH) {
5023 5023 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5024 5024 "sd_get_physical_geometry: received unexpected bd_len "
5025 5025 "of %d, page3\n", bd_len);
5026 5026 status = EIO;
5027 5027 goto page3_exit;
5028 5028 }
5029 5029
5030 5030 page3p = (struct mode_format *)
5031 5031 ((caddr_t)headerp + mode_header_length + bd_len);
5032 5032
5033 5033 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
5034 5034 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5035 5035 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5036 5036 "%d\n", page3p->mode_page.code);
5037 5037 status = EIO;
5038 5038 goto page3_exit;
5039 5039 }
5040 5040
5041 5041 /*
5042 5042 * Use this physical geometry data only if BOTH MODE SENSE commands
5043 5043 * complete successfully; otherwise, revert to the logical geometry.
5044 5044 * So, we need to save everything in temporary variables.
5045 5045 */
5046 5046 sector_size = BE_16(page3p->data_bytes_sect);
5047 5047
5048 5048 /*
5049 5049 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5050 5050 */
5051 5051 if (sector_size == 0) {
5052 5052 sector_size = un->un_sys_blocksize;
5053 5053 } else {
5054 5054 sector_size &= ~(un->un_sys_blocksize - 1);
5055 5055 }
5056 5056
5057 5057 nsect = BE_16(page3p->sect_track);
5058 5058 intrlv = BE_16(page3p->interleave);
5059 5059
5060 5060 SD_INFO(SD_LOG_COMMON, un,
5061 5061 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5062 5062 SD_INFO(SD_LOG_COMMON, un,
5063 5063 " mode page: %d; nsect: %d; sector size: %d;\n",
5064 5064 page3p->mode_page.code, nsect, sector_size);
5065 5065 SD_INFO(SD_LOG_COMMON, un,
5066 5066 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
5067 5067 BE_16(page3p->track_skew),
5068 5068 BE_16(page3p->cylinder_skew));
5069 5069
5070 5070 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5071 5071
5072 5072 /*
5073 5073 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5074 5074 */
5075 5075 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
5076 5076 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
5077 5077 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
5078 5078 if (status != 0) {
5079 5079 SD_ERROR(SD_LOG_COMMON, un,
5080 5080 "sd_get_physical_geometry: mode sense page 4 failed\n");
5081 5081 goto page4_exit;
5082 5082 }
5083 5083
5084 5084 /*
5085 5085 * Determine size of Block Descriptors in order to locate the mode
5086 5086 * page data. ATAPI devices return 0, SCSI devices should return
5087 5087 * MODE_BLK_DESC_LENGTH.
5088 5088 */
5089 5089 headerp = (struct mode_header *)p4bufp;
5090 5090 if (un->un_f_cfg_is_atapi == TRUE) {
5091 5091 struct mode_header_grp2 *mhp =
5092 5092 (struct mode_header_grp2 *)headerp;
5093 5093 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5094 5094 } else {
5095 5095 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5096 5096 }
5097 5097
5098 5098 if (bd_len > MODE_BLK_DESC_LENGTH) {
5099 5099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5100 5100 "sd_get_physical_geometry: received unexpected bd_len of "
5101 5101 "%d, page4\n", bd_len);
5102 5102 status = EIO;
5103 5103 goto page4_exit;
5104 5104 }
5105 5105
5106 5106 page4p = (struct mode_geometry *)
5107 5107 ((caddr_t)headerp + mode_header_length + bd_len);
5108 5108
5109 5109 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
5110 5110 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5111 5111 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5112 5112 "%d\n", page4p->mode_page.code);
5113 5113 status = EIO;
5114 5114 goto page4_exit;
5115 5115 }
5116 5116
5117 5117 /*
5118 5118 * Stash the data now, after we know that both commands completed.
5119 5119 */
5120 5120
5121 5121
5122 5122 nhead = (int)page4p->heads; /* uchar, so no conversion needed */
5123 5123 spc = nhead * nsect;
5124 5124 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
5125 5125 rpm = BE_16(page4p->rpm);
5126 5126
5127 5127 modesense_capacity = spc * ncyl;
5128 5128
5129 5129 SD_INFO(SD_LOG_COMMON, un,
5130 5130 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5131 5131 SD_INFO(SD_LOG_COMMON, un,
5132 5132 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
5133 5133 SD_INFO(SD_LOG_COMMON, un,
5134 5134 " computed capacity(h*s*c): %d;\n", modesense_capacity);
5135 5135 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
5136 5136 (void *)pgeom_p, capacity);
5137 5137
5138 5138 /*
5139 5139 * Compensate if the drive's geometry is not rectangular, i.e.,
5140 5140 * the product of C * H * S returned by MODE SENSE >= that returned
5141 5141 * by read capacity. This is an idiosyncrasy of the original x86
5142 5142 * disk subsystem.
5143 5143 */
5144 5144 if (modesense_capacity >= capacity) {
5145 5145 SD_INFO(SD_LOG_COMMON, un,
5146 5146 "sd_get_physical_geometry: adjusting acyl; "
5147 5147 "old: %d; new: %d\n", pgeom_p->g_acyl,
5148 5148 (modesense_capacity - capacity + spc - 1) / spc);
5149 5149 if (sector_size != 0) {
5150 5150 /* 1243403: NEC D38x7 drives don't support sec size */
5151 5151 pgeom_p->g_secsize = (unsigned short)sector_size;
5152 5152 }
5153 5153 pgeom_p->g_nsect = (unsigned short)nsect;
5154 5154 pgeom_p->g_nhead = (unsigned short)nhead;
5155 5155 pgeom_p->g_capacity = capacity;
5156 5156 pgeom_p->g_acyl =
5157 5157 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
5158 5158 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
5159 5159 }
5160 5160
5161 5161 pgeom_p->g_rpm = (unsigned short)rpm;
5162 5162 pgeom_p->g_intrlv = (unsigned short)intrlv;
5163 5163 ret = 0;
5164 5164
5165 5165 SD_INFO(SD_LOG_COMMON, un,
5166 5166 "sd_get_physical_geometry: mode sense geometry:\n");
5167 5167 SD_INFO(SD_LOG_COMMON, un,
5168 5168 " nsect: %d; sector size: %d; interlv: %d\n",
5169 5169 nsect, sector_size, intrlv);
5170 5170 SD_INFO(SD_LOG_COMMON, un,
5171 5171 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5172 5172 nhead, ncyl, rpm, modesense_capacity);
5173 5173 SD_INFO(SD_LOG_COMMON, un,
5174 5174 "sd_get_physical_geometry: (cached)\n");
5175 5175 SD_INFO(SD_LOG_COMMON, un,
5176 5176 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5177 5177 pgeom_p->g_ncyl, pgeom_p->g_acyl,
5178 5178 pgeom_p->g_nhead, pgeom_p->g_nsect);
5179 5179 SD_INFO(SD_LOG_COMMON, un,
5180 5180 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5181 5181 pgeom_p->g_secsize, pgeom_p->g_capacity,
5182 5182 pgeom_p->g_intrlv, pgeom_p->g_rpm);
5183 5183 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5184 5184
5185 5185 page4_exit:
5186 5186 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
5187 5187
5188 5188 page3_exit:
5189 5189 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
5190 5190
5191 5191 if (status != 0) {
5192 5192 if (status == EIO) {
5193 5193 /*
5194 5194 * Some disks do not support mode sense(6), we
5195 5195 * should ignore this kind of error(sense key is
5196 5196 * 0x5 - illegal request).
5197 5197 */
5198 5198 uint8_t *sensep;
5199 5199 int senlen;
5200 5200
5201 5201 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
5202 5202 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
5203 5203 ssc->ssc_uscsi_cmd->uscsi_rqresid);
5204 5204
5205 5205 if (senlen > 0 &&
5206 5206 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
5207 5207 sd_ssc_assessment(ssc,
5208 5208 SD_FMT_IGNORE_COMPROMISE);
5209 5209 } else {
5210 5210 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
5211 5211 }
5212 5212 } else {
5213 5213 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5214 5214 }
5215 5215 }
5216 5216 sd_ssc_fini(ssc);
5217 5217 return (ret);
5218 5218 }
5219 5219
5220 5220 /*
5221 5221 * Function: sd_get_virtual_geometry
5222 5222 *
5223 5223 * Description: Ask the controller to tell us about the target device.
5224 5224 *
5225 5225 * Arguments: un - pointer to softstate
5226 5226 * capacity - disk capacity in #blocks
5227 5227 * lbasize - disk block size in bytes
5228 5228 *
5229 5229 * Context: Kernel thread only
5230 5230 */
5231 5231
5232 5232 static int
5233 5233 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
5234 5234 diskaddr_t capacity, int lbasize)
5235 5235 {
5236 5236 uint_t geombuf;
5237 5237 int spc;
5238 5238
5239 5239 ASSERT(un != NULL);
5240 5240
5241 5241 /* Set sector size, and total number of sectors */
5242 5242 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
5243 5243 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
5244 5244
5245 5245 /* Let the HBA tell us its geometry */
5246 5246 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
5247 5247
5248 5248 /* A value of -1 indicates an undefined "geometry" property */
5249 5249 if (geombuf == (-1)) {
5250 5250 return (EINVAL);
5251 5251 }
5252 5252
5253 5253 /* Initialize the logical geometry cache. */
5254 5254 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
5255 5255 lgeom_p->g_nsect = geombuf & 0xffff;
5256 5256 lgeom_p->g_secsize = un->un_sys_blocksize;
5257 5257
5258 5258 spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
5259 5259
5260 5260 /*
5261 5261 * Note: The driver originally converted the capacity value from
5262 5262 * target blocks to system blocks. However, the capacity value passed
5263 5263 * to this routine is already in terms of system blocks (this scaling
5264 5264 * is done when the READ CAPACITY command is issued and processed).
5265 5265 * This 'error' may have gone undetected because the usage of g_ncyl
5266 5266 * (which is based upon g_capacity) is very limited within the driver
5267 5267 */
5268 5268 lgeom_p->g_capacity = capacity;
5269 5269
5270 5270 /*
5271 5271 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5272 5272 * hba may return zero values if the device has been removed.
5273 5273 */
5274 5274 if (spc == 0) {
5275 5275 lgeom_p->g_ncyl = 0;
5276 5276 } else {
5277 5277 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
5278 5278 }
5279 5279 lgeom_p->g_acyl = 0;
5280 5280
5281 5281 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
5282 5282 return (0);
5283 5283
5284 5284 }
5285 5285 /*
5286 5286 * Function: sd_update_block_info
5287 5287 *
5288 5288 * Description: Calculate a byte count to sector count bitshift value
5289 5289 * from sector size.
5290 5290 *
5291 5291 * Arguments: un: unit struct.
5292 5292 * lbasize: new target sector size
5293 5293 * capacity: new target capacity, ie. block count
5294 5294 *
5295 5295 * Context: Kernel thread context
5296 5296 */
5297 5297
5298 5298 static void
5299 5299 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
5300 5300 {
5301 5301 if (lbasize != 0) {
5302 5302 un->un_tgt_blocksize = lbasize;
5303 5303 un->un_f_tgt_blocksize_is_valid = TRUE;
5304 5304 if (!un->un_f_has_removable_media) {
5305 5305 un->un_sys_blocksize = lbasize;
5306 5306 }
5307 5307 }
5308 5308
5309 5309 if (capacity != 0) {
5310 5310 un->un_blockcount = capacity;
5311 5311 un->un_f_blockcount_is_valid = TRUE;
5312 5312
5313 5313 /*
5314 5314 * The capacity has changed so update the errstats.
5315 5315 */
5316 5316 if (un->un_errstats != NULL) {
5317 5317 struct sd_errstats *stp;
5318 5318
5319 5319 capacity *= un->un_sys_blocksize;
5320 5320 stp = (struct sd_errstats *)un->un_errstats->ks_data;
5321 5321 if (stp->sd_capacity.value.ui64 < capacity)
5322 5322 stp->sd_capacity.value.ui64 = capacity;
5323 5323 }
5324 5324 }
5325 5325 }
5326 5326
5327 5327
5328 5328 /*
5329 5329 * Function: sd_register_devid
5330 5330 *
5331 5331 * Description: This routine will obtain the device id information from the
5332 5332 * target, obtain the serial number, and register the device
5333 5333 * id with the ddi framework.
5334 5334 *
5335 5335 * Arguments: devi - the system's dev_info_t for the device.
5336 5336 * un - driver soft state (unit) structure
5337 5337 * reservation_flag - indicates if a reservation conflict
5338 5338 * occurred during attach
5339 5339 *
5340 5340 * Context: Kernel Thread
5341 5341 */
5342 5342 static void
5343 5343 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
5344 5344 {
5345 5345 int rval = 0;
5346 5346 uchar_t *inq80 = NULL;
5347 5347 size_t inq80_len = MAX_INQUIRY_SIZE;
5348 5348 size_t inq80_resid = 0;
5349 5349 uchar_t *inq83 = NULL;
5350 5350 size_t inq83_len = MAX_INQUIRY_SIZE;
5351 5351 size_t inq83_resid = 0;
5352 5352 int dlen, len;
5353 5353 char *sn;
5354 5354 struct sd_lun *un;
5355 5355
5356 5356 ASSERT(ssc != NULL);
5357 5357 un = ssc->ssc_un;
5358 5358 ASSERT(un != NULL);
5359 5359 ASSERT(mutex_owned(SD_MUTEX(un)));
5360 5360 ASSERT((SD_DEVINFO(un)) == devi);
5361 5361
5362 5362
5363 5363 /*
5364 5364 * We check the availability of the World Wide Name (0x83) and Unit
5365 5365 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5366 5366 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5367 5367 * 0x83 is available, that is the best choice. Our next choice is
5368 5368 * 0x80. If neither are available, we munge the devid from the device
5369 5369 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5370 5370 * to fabricate a devid for non-Sun qualified disks.
5371 5371 */
5372 5372 if (sd_check_vpd_page_support(ssc) == 0) {
5373 5373 /* collect page 80 data if available */
5374 5374 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
5375 5375
5376 5376 mutex_exit(SD_MUTEX(un));
5377 5377 inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
5378 5378
5379 5379 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
5380 5380 0x01, 0x80, &inq80_resid);
5381 5381
5382 5382 if (rval != 0) {
5383 5383 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5384 5384 kmem_free(inq80, inq80_len);
5385 5385 inq80 = NULL;
5386 5386 inq80_len = 0;
5387 5387 } else if (ddi_prop_exists(
5388 5388 DDI_DEV_T_NONE, SD_DEVINFO(un),
5389 5389 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
5390 5390 INQUIRY_SERIAL_NO) == 0) {
5391 5391 /*
5392 5392 * If we don't already have a serial number
5393 5393 * property, do quick verify of data returned
5394 5394 * and define property.
5395 5395 */
5396 5396 dlen = inq80_len - inq80_resid;
5397 5397 len = (size_t)inq80[3];
5398 5398 if ((dlen >= 4) && ((len + 4) <= dlen)) {
5399 5399 /*
5400 5400 * Ensure sn termination, skip leading
5401 5401 * blanks, and create property
5402 5402 * 'inquiry-serial-no'.
5403 5403 */
5404 5404 sn = (char *)&inq80[4];
5405 5405 sn[len] = 0;
5406 5406 while (*sn && (*sn == ' '))
5407 5407 sn++;
5408 5408 if (*sn) {
5409 5409 (void) ddi_prop_update_string(
5410 5410 DDI_DEV_T_NONE,
5411 5411 SD_DEVINFO(un),
5412 5412 INQUIRY_SERIAL_NO, sn);
5413 5413 }
5414 5414 }
5415 5415 }
5416 5416 mutex_enter(SD_MUTEX(un));
5417 5417 }
5418 5418
5419 5419 /* collect page 83 data if available */
5420 5420 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
5421 5421 mutex_exit(SD_MUTEX(un));
5422 5422 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
5423 5423
5424 5424 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
5425 5425 0x01, 0x83, &inq83_resid);
5426 5426
5427 5427 if (rval != 0) {
5428 5428 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5429 5429 kmem_free(inq83, inq83_len);
5430 5430 inq83 = NULL;
5431 5431 inq83_len = 0;
5432 5432 }
5433 5433 mutex_enter(SD_MUTEX(un));
5434 5434 }
5435 5435 }
5436 5436
5437 5437 /*
5438 5438 * If transport has already registered a devid for this target
5439 5439 * then that takes precedence over the driver's determination
5440 5440 * of the devid.
5441 5441 *
5442 5442 * NOTE: The reason this check is done here instead of at the beginning
5443 5443 * of the function is to allow the code above to create the
5444 5444 * 'inquiry-serial-no' property.
5445 5445 */
5446 5446 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
5447 5447 ASSERT(un->un_devid);
5448 5448 un->un_f_devid_transport_defined = TRUE;
5449 5449 goto cleanup; /* use devid registered by the transport */
5450 5450 }
5451 5451
5452 5452 /*
5453 5453 * This is the case of antiquated Sun disk drives that have the
5454 5454 * FAB_DEVID property set in the disk_table. These drives
5455 5455 * manage the devid's by storing them in last 2 available sectors
5456 5456 * on the drive and have them fabricated by the ddi layer by calling
5457 5457 * ddi_devid_init and passing the DEVID_FAB flag.
5458 5458 */
5459 5459 if (un->un_f_opt_fab_devid == TRUE) {
5460 5460 /*
5461 5461 * Depending on EINVAL isn't reliable, since a reserved disk
5462 5462 * may result in invalid geometry, so check to make sure a
5463 5463 * reservation conflict did not occur during attach.
5464 5464 */
5465 5465 if ((sd_get_devid(ssc) == EINVAL) &&
5466 5466 (reservation_flag != SD_TARGET_IS_RESERVED)) {
5467 5467 /*
5468 5468 * The devid is invalid AND there is no reservation
5469 5469 * conflict. Fabricate a new devid.
5470 5470 */
5471 5471 (void) sd_create_devid(ssc);
5472 5472 }
5473 5473
5474 5474 /* Register the devid if it exists */
5475 5475 if (un->un_devid != NULL) {
5476 5476 (void) ddi_devid_register(SD_DEVINFO(un),
5477 5477 un->un_devid);
5478 5478 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5479 5479 "sd_register_devid: Devid Fabricated\n");
5480 5480 }
5481 5481 goto cleanup;
5482 5482 }
5483 5483
5484 5484 /* encode best devid possible based on data available */
5485 5485 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5486 5486 (char *)ddi_driver_name(SD_DEVINFO(un)),
5487 5487 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5488 5488 inq80, inq80_len - inq80_resid, inq83, inq83_len -
5489 5489 inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5490 5490
5491 5491 /* devid successfully encoded, register devid */
5492 5492 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5493 5493
5494 5494 } else {
5495 5495 /*
5496 5496 * Unable to encode a devid based on data available.
5497 5497 * This is not a Sun qualified disk. Older Sun disk
5498 5498 * drives that have the SD_FAB_DEVID property
5499 5499 * set in the disk_table and non Sun qualified
5500 5500 * disks are treated in the same manner. These
5501 5501 * drives manage the devid's by storing them in
5502 5502 * last 2 available sectors on the drive and
5503 5503 * have them fabricated by the ddi layer by
5504 5504 * calling ddi_devid_init and passing the
5505 5505 * DEVID_FAB flag.
5506 5506 * Create a fabricate devid only if there's no
5507 5507 * fabricate devid existed.
5508 5508 */
5509 5509 if (sd_get_devid(ssc) == EINVAL) {
5510 5510 (void) sd_create_devid(ssc);
5511 5511 }
5512 5512 un->un_f_opt_fab_devid = TRUE;
5513 5513
5514 5514 /* Register the devid if it exists */
5515 5515 if (un->un_devid != NULL) {
5516 5516 (void) ddi_devid_register(SD_DEVINFO(un),
5517 5517 un->un_devid);
5518 5518 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5519 5519 "sd_register_devid: devid fabricated using "
5520 5520 "ddi framework\n");
5521 5521 }
5522 5522 }
5523 5523
5524 5524 cleanup:
5525 5525 /* clean up resources */
5526 5526 if (inq80 != NULL) {
5527 5527 kmem_free(inq80, inq80_len);
5528 5528 }
5529 5529 if (inq83 != NULL) {
5530 5530 kmem_free(inq83, inq83_len);
5531 5531 }
5532 5532 }
5533 5533
5534 5534
5535 5535
5536 5536 /*
5537 5537 * Function: sd_get_devid
5538 5538 *
5539 5539 * Description: This routine will return 0 if a valid device id has been
5540 5540 * obtained from the target and stored in the soft state. If a
5541 5541 * valid device id has not been previously read and stored, a
5542 5542 * read attempt will be made.
5543 5543 *
5544 5544 * Arguments: un - driver soft state (unit) structure
5545 5545 *
5546 5546 * Return Code: 0 if we successfully get the device id
5547 5547 *
5548 5548 * Context: Kernel Thread
5549 5549 */
5550 5550
5551 5551 static int
5552 5552 sd_get_devid(sd_ssc_t *ssc)
5553 5553 {
5554 5554 struct dk_devid *dkdevid;
5555 5555 ddi_devid_t tmpid;
5556 5556 uint_t *ip;
5557 5557 size_t sz;
5558 5558 diskaddr_t blk;
5559 5559 int status;
5560 5560 int chksum;
5561 5561 int i;
5562 5562 size_t buffer_size;
5563 5563 struct sd_lun *un;
5564 5564
5565 5565 ASSERT(ssc != NULL);
5566 5566 un = ssc->ssc_un;
5567 5567 ASSERT(un != NULL);
5568 5568 ASSERT(mutex_owned(SD_MUTEX(un)));
5569 5569
5570 5570 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
5571 5571 un);
5572 5572
5573 5573 if (un->un_devid != NULL) {
5574 5574 return (0);
5575 5575 }
5576 5576
5577 5577 mutex_exit(SD_MUTEX(un));
5578 5578 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5579 5579 (void *)SD_PATH_DIRECT) != 0) {
5580 5580 mutex_enter(SD_MUTEX(un));
5581 5581 return (EINVAL);
5582 5582 }
5583 5583
5584 5584 /*
5585 5585 * Read and verify device id, stored in the reserved cylinders at the
5586 5586 * end of the disk. Backup label is on the odd sectors of the last
5587 5587 * track of the last cylinder. Device id will be on track of the next
5588 5588 * to last cylinder.
5589 5589 */
5590 5590 mutex_enter(SD_MUTEX(un));
5591 5591 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
5592 5592 mutex_exit(SD_MUTEX(un));
5593 5593 dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
5594 5594 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
5595 5595 SD_PATH_DIRECT);
5596 5596
5597 5597 if (status != 0) {
5598 5598 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5599 5599 goto error;
5600 5600 }
5601 5601
5602 5602 /* Validate the revision */
5603 5603 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
5604 5604 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
5605 5605 status = EINVAL;
5606 5606 goto error;
5607 5607 }
5608 5608
5609 5609 /* Calculate the checksum */
5610 5610 chksum = 0;
5611 5611 ip = (uint_t *)dkdevid;
5612 5612 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5613 5613 i++) {
5614 5614 chksum ^= ip[i];
5615 5615 }
5616 5616
5617 5617 /* Compare the checksums */
5618 5618 if (DKD_GETCHKSUM(dkdevid) != chksum) {
5619 5619 status = EINVAL;
5620 5620 goto error;
5621 5621 }
5622 5622
5623 5623 /* Validate the device id */
5624 5624 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
5625 5625 status = EINVAL;
5626 5626 goto error;
5627 5627 }
5628 5628
5629 5629 /*
5630 5630 * Store the device id in the driver soft state
5631 5631 */
5632 5632 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
5633 5633 tmpid = kmem_alloc(sz, KM_SLEEP);
5634 5634
5635 5635 mutex_enter(SD_MUTEX(un));
5636 5636
5637 5637 un->un_devid = tmpid;
5638 5638 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
5639 5639
5640 5640 kmem_free(dkdevid, buffer_size);
5641 5641
5642 5642 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
5643 5643
5644 5644 return (status);
5645 5645 error:
5646 5646 mutex_enter(SD_MUTEX(un));
5647 5647 kmem_free(dkdevid, buffer_size);
5648 5648 return (status);
5649 5649 }
5650 5650
5651 5651
5652 5652 /*
5653 5653 * Function: sd_create_devid
5654 5654 *
5655 5655 * Description: This routine will fabricate the device id and write it
5656 5656 * to the disk.
5657 5657 *
5658 5658 * Arguments: un - driver soft state (unit) structure
5659 5659 *
5660 5660 * Return Code: value of the fabricated device id
5661 5661 *
5662 5662 * Context: Kernel Thread
5663 5663 */
5664 5664
5665 5665 static ddi_devid_t
5666 5666 sd_create_devid(sd_ssc_t *ssc)
5667 5667 {
5668 5668 struct sd_lun *un;
5669 5669
5670 5670 ASSERT(ssc != NULL);
5671 5671 un = ssc->ssc_un;
5672 5672 ASSERT(un != NULL);
5673 5673
5674 5674 /* Fabricate the devid */
5675 5675 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
5676 5676 == DDI_FAILURE) {
5677 5677 return (NULL);
5678 5678 }
5679 5679
5680 5680 /* Write the devid to disk */
5681 5681 if (sd_write_deviceid(ssc) != 0) {
5682 5682 ddi_devid_free(un->un_devid);
5683 5683 un->un_devid = NULL;
5684 5684 }
5685 5685
5686 5686 return (un->un_devid);
5687 5687 }
5688 5688
5689 5689
5690 5690 /*
5691 5691 * Function: sd_write_deviceid
5692 5692 *
5693 5693 * Description: This routine will write the device id to the disk
5694 5694 * reserved sector.
5695 5695 *
5696 5696 * Arguments: un - driver soft state (unit) structure
5697 5697 *
5698 5698 * Return Code: EINVAL
5699 5699 * value returned by sd_send_scsi_cmd
5700 5700 *
5701 5701 * Context: Kernel Thread
5702 5702 */
5703 5703
5704 5704 static int
5705 5705 sd_write_deviceid(sd_ssc_t *ssc)
5706 5706 {
5707 5707 struct dk_devid *dkdevid;
5708 5708 uchar_t *buf;
5709 5709 diskaddr_t blk;
5710 5710 uint_t *ip, chksum;
5711 5711 int status;
5712 5712 int i;
5713 5713 struct sd_lun *un;
5714 5714
5715 5715 ASSERT(ssc != NULL);
5716 5716 un = ssc->ssc_un;
5717 5717 ASSERT(un != NULL);
5718 5718 ASSERT(mutex_owned(SD_MUTEX(un)));
5719 5719
5720 5720 mutex_exit(SD_MUTEX(un));
5721 5721 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5722 5722 (void *)SD_PATH_DIRECT) != 0) {
5723 5723 mutex_enter(SD_MUTEX(un));
5724 5724 return (-1);
5725 5725 }
5726 5726
5727 5727
5728 5728 /* Allocate the buffer */
5729 5729 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
5730 5730 dkdevid = (struct dk_devid *)buf;
5731 5731
5732 5732 /* Fill in the revision */
5733 5733 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
5734 5734 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
5735 5735
5736 5736 /* Copy in the device id */
5737 5737 mutex_enter(SD_MUTEX(un));
5738 5738 bcopy(un->un_devid, &dkdevid->dkd_devid,
5739 5739 ddi_devid_sizeof(un->un_devid));
5740 5740 mutex_exit(SD_MUTEX(un));
5741 5741
5742 5742 /* Calculate the checksum */
5743 5743 chksum = 0;
5744 5744 ip = (uint_t *)dkdevid;
5745 5745 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5746 5746 i++) {
5747 5747 chksum ^= ip[i];
5748 5748 }
5749 5749
5750 5750 /* Fill-in checksum */
5751 5751 DKD_FORMCHKSUM(chksum, dkdevid);
5752 5752
5753 5753 /* Write the reserved sector */
5754 5754 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
5755 5755 SD_PATH_DIRECT);
5756 5756 if (status != 0)
5757 5757 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5758 5758
5759 5759 kmem_free(buf, un->un_sys_blocksize);
5760 5760
5761 5761 mutex_enter(SD_MUTEX(un));
5762 5762 return (status);
5763 5763 }
5764 5764
5765 5765
5766 5766 /*
5767 5767 * Function: sd_check_vpd_page_support
5768 5768 *
5769 5769 * Description: This routine sends an inquiry command with the EVPD bit set and
5770 5770 * a page code of 0x00 to the device. It is used to determine which
5771 5771 * vital product pages are available to find the devid. We are
5772 5772 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5773 5773 * the device does not support that command.
5774 5774 *
5775 5775 * Arguments: un - driver soft state (unit) structure
5776 5776 *
5777 5777 * Return Code: 0 - success
5778 5778 * 1 - check condition
5779 5779 *
5780 5780 * Context: This routine can sleep.
5781 5781 */
5782 5782
5783 5783 static int
5784 5784 sd_check_vpd_page_support(sd_ssc_t *ssc)
5785 5785 {
5786 5786 uchar_t *page_list = NULL;
5787 5787 uchar_t page_length = 0xff; /* Use max possible length */
5788 5788 uchar_t evpd = 0x01; /* Set the EVPD bit */
5789 5789 uchar_t page_code = 0x00; /* Supported VPD Pages */
5790 5790 int rval = 0;
5791 5791 int counter;
5792 5792 struct sd_lun *un;
5793 5793
5794 5794 ASSERT(ssc != NULL);
5795 5795 un = ssc->ssc_un;
5796 5796 ASSERT(un != NULL);
5797 5797 ASSERT(mutex_owned(SD_MUTEX(un)));
5798 5798
5799 5799 mutex_exit(SD_MUTEX(un));
5800 5800
5801 5801 /*
5802 5802 * We'll set the page length to the maximum to save figuring it out
5803 5803 * with an additional call.
5804 5804 */
5805 5805 page_list = kmem_zalloc(page_length, KM_SLEEP);
5806 5806
5807 5807 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
5808 5808 page_code, NULL);
5809 5809
5810 5810 if (rval != 0)
5811 5811 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5812 5812
5813 5813 mutex_enter(SD_MUTEX(un));
5814 5814
5815 5815 /*
5816 5816 * Now we must validate that the device accepted the command, as some
5817 5817 * drives do not support it. If the drive does support it, we will
5818 5818 * return 0, and the supported pages will be in un_vpd_page_mask. If
5819 5819 * not, we return -1.
5820 5820 */
5821 5821 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
5822 5822 /* Loop to find one of the 2 pages we need */
5823 5823 counter = 4; /* Supported pages start at byte 4, with 0x00 */
5824 5824
5825 5825 /*
5826 5826 * Pages are returned in ascending order, and 0x83 is what we
5827 5827 * are hoping for.
5828 5828 */
5829 5829 while ((page_list[counter] <= 0xB1) &&
5830 5830 (counter <= (page_list[VPD_PAGE_LENGTH] +
5831 5831 VPD_HEAD_OFFSET))) {
5832 5832 /*
5833 5833 * Add 3 because page_list[3] is the number of
5834 5834 * pages minus 3
5835 5835 */
5836 5836
5837 5837 switch (page_list[counter]) {
5838 5838 case 0x00:
5839 5839 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
5840 5840 break;
5841 5841 case 0x80:
5842 5842 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
5843 5843 break;
5844 5844 case 0x81:
5845 5845 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
5846 5846 break;
5847 5847 case 0x82:
5848 5848 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5849 5849 break;
5850 5850 case 0x83:
5851 5851 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5852 5852 break;
5853 5853 case 0x86:
5854 5854 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5855 5855 break;
5856 5856 case 0xB1:
5857 5857 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5858 5858 break;
5859 5859 }
5860 5860 counter++;
5861 5861 }
5862 5862
5863 5863 } else {
5864 5864 rval = -1;
5865 5865
5866 5866 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5867 5867 "sd_check_vpd_page_support: This drive does not implement "
5868 5868 "VPD pages.\n");
5869 5869 }
5870 5870
5871 5871 kmem_free(page_list, page_length);
5872 5872
5873 5873 return (rval);
5874 5874 }
5875 5875
5876 5876
5877 5877 /*
5878 5878 * Function: sd_setup_pm
5879 5879 *
5880 5880 * Description: Initialize Power Management on the device
5881 5881 *
5882 5882 * Context: Kernel Thread
5883 5883 */
5884 5884
5885 5885 static void
5886 5886 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5887 5887 {
5888 5888 uint_t log_page_size;
5889 5889 uchar_t *log_page_data;
5890 5890 int rval = 0;
5891 5891 struct sd_lun *un;
5892 5892
5893 5893 ASSERT(ssc != NULL);
5894 5894 un = ssc->ssc_un;
5895 5895 ASSERT(un != NULL);
5896 5896
5897 5897 /*
5898 5898 * Since we are called from attach, holding a mutex for
5899 5899 * un is unnecessary. Because some of the routines called
5900 5900 * from here require SD_MUTEX to not be held, assert this
5901 5901 * right up front.
5902 5902 */
5903 5903 ASSERT(!mutex_owned(SD_MUTEX(un)));
5904 5904 /*
5905 5905 * Since the sd device does not have the 'reg' property,
5906 5906 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5907 5907 * The following code is to tell cpr that this device
5908 5908 * DOES need to be suspended and resumed.
5909 5909 */
5910 5910 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
5911 5911 "pm-hardware-state", "needs-suspend-resume");
5912 5912
5913 5913 /*
5914 5914 * This complies with the new power management framework
5915 5915 * for certain desktop machines. Create the pm_components
5916 5916 * property as a string array property.
5917 5917 * If un_f_pm_supported is TRUE, that means the disk
5918 5918 * attached HBA has set the "pm-capable" property and
5919 5919 * the value of this property is bigger than 0.
5920 5920 */
5921 5921 if (un->un_f_pm_supported) {
5922 5922 /*
5923 5923 * not all devices have a motor, try it first.
5924 5924 * some devices may return ILLEGAL REQUEST, some
5925 5925 * will hang
5926 5926 * The following START_STOP_UNIT is used to check if target
5927 5927 * device has a motor.
5928 5928 */
5929 5929 un->un_f_start_stop_supported = TRUE;
5930 5930
5931 5931 if (un->un_f_power_condition_supported) {
5932 5932 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5933 5933 SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5934 5934 SD_PATH_DIRECT);
5935 5935 if (rval != 0) {
5936 5936 un->un_f_power_condition_supported = FALSE;
5937 5937 }
5938 5938 }
5939 5939 if (!un->un_f_power_condition_supported) {
5940 5940 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5941 5941 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5942 5942 }
5943 5943 if (rval != 0) {
5944 5944 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5945 5945 un->un_f_start_stop_supported = FALSE;
5946 5946 }
5947 5947
5948 5948 /*
5949 5949 * create pm properties anyways otherwise the parent can't
5950 5950 * go to sleep
5951 5951 */
5952 5952 un->un_f_pm_is_enabled = TRUE;
5953 5953 (void) sd_create_pm_components(devi, un);
5954 5954
5955 5955 /*
5956 5956 * If it claims that log sense is supported, check it out.
5957 5957 */
5958 5958 if (un->un_f_log_sense_supported) {
5959 5959 rval = sd_log_page_supported(ssc,
5960 5960 START_STOP_CYCLE_PAGE);
5961 5961 if (rval == 1) {
5962 5962 /* Page found, use it. */
5963 5963 un->un_start_stop_cycle_page =
5964 5964 START_STOP_CYCLE_PAGE;
5965 5965 } else {
5966 5966 /*
5967 5967 * Page not found or log sense is not
5968 5968 * supported.
5969 5969 * Notice we do not check the old style
5970 5970 * START_STOP_CYCLE_VU_PAGE because this
5971 5971 * code path does not apply to old disks.
5972 5972 */
5973 5973 un->un_f_log_sense_supported = FALSE;
5974 5974 un->un_f_pm_log_sense_smart = FALSE;
5975 5975 }
5976 5976 }
5977 5977
5978 5978 return;
5979 5979 }
5980 5980
5981 5981 /*
5982 5982 * For the disk whose attached HBA has not set the "pm-capable"
5983 5983 * property, check if it supports the power management.
5984 5984 */
5985 5985 if (!un->un_f_log_sense_supported) {
5986 5986 un->un_power_level = SD_SPINDLE_ON;
5987 5987 un->un_f_pm_is_enabled = FALSE;
5988 5988 return;
5989 5989 }
5990 5990
5991 5991 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
5992 5992
5993 5993 #ifdef SDDEBUG
5994 5994 if (sd_force_pm_supported) {
5995 5995 /* Force a successful result */
5996 5996 rval = 1;
5997 5997 }
5998 5998 #endif
5999 5999
6000 6000 /*
6001 6001 * If the start-stop cycle counter log page is not supported
6002 6002 * or if the pm-capable property is set to be false (0),
6003 6003 * then we should not create the pm_components property.
6004 6004 */
6005 6005 if (rval == -1) {
6006 6006 /*
6007 6007 * Error.
6008 6008 * Reading log sense failed, most likely this is
6009 6009 * an older drive that does not support log sense.
6010 6010 * If this fails auto-pm is not supported.
6011 6011 */
6012 6012 un->un_power_level = SD_SPINDLE_ON;
6013 6013 un->un_f_pm_is_enabled = FALSE;
6014 6014
6015 6015 } else if (rval == 0) {
6016 6016 /*
6017 6017 * Page not found.
6018 6018 * The start stop cycle counter is implemented as page
6019 6019 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
6020 6020 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
6021 6021 */
6022 6022 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
6023 6023 /*
6024 6024 * Page found, use this one.
6025 6025 */
6026 6026 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
6027 6027 un->un_f_pm_is_enabled = TRUE;
6028 6028 } else {
6029 6029 /*
6030 6030 * Error or page not found.
6031 6031 * auto-pm is not supported for this device.
6032 6032 */
6033 6033 un->un_power_level = SD_SPINDLE_ON;
6034 6034 un->un_f_pm_is_enabled = FALSE;
6035 6035 }
6036 6036 } else {
6037 6037 /*
6038 6038 * Page found, use it.
6039 6039 */
6040 6040 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
6041 6041 un->un_f_pm_is_enabled = TRUE;
6042 6042 }
6043 6043
6044 6044
6045 6045 if (un->un_f_pm_is_enabled == TRUE) {
6046 6046 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6047 6047 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6048 6048
6049 6049 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6050 6050 log_page_size, un->un_start_stop_cycle_page,
6051 6051 0x01, 0, SD_PATH_DIRECT);
6052 6052
6053 6053 if (rval != 0) {
6054 6054 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6055 6055 }
6056 6056
6057 6057 #ifdef SDDEBUG
6058 6058 if (sd_force_pm_supported) {
6059 6059 /* Force a successful result */
6060 6060 rval = 0;
6061 6061 }
6062 6062 #endif
6063 6063
6064 6064 /*
6065 6065 * If the Log sense for Page( Start/stop cycle counter page)
6066 6066 * succeeds, then power management is supported and we can
6067 6067 * enable auto-pm.
6068 6068 */
6069 6069 if (rval == 0) {
6070 6070 (void) sd_create_pm_components(devi, un);
6071 6071 } else {
6072 6072 un->un_power_level = SD_SPINDLE_ON;
6073 6073 un->un_f_pm_is_enabled = FALSE;
6074 6074 }
6075 6075
6076 6076 kmem_free(log_page_data, log_page_size);
6077 6077 }
6078 6078 }
6079 6079
6080 6080
6081 6081 /*
6082 6082 * Function: sd_create_pm_components
6083 6083 *
6084 6084 * Description: Initialize PM property.
6085 6085 *
6086 6086 * Context: Kernel thread context
6087 6087 */
6088 6088
6089 6089 static void
6090 6090 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
6091 6091 {
6092 6092 ASSERT(!mutex_owned(SD_MUTEX(un)));
6093 6093
6094 6094 if (un->un_f_power_condition_supported) {
6095 6095 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6096 6096 "pm-components", sd_pwr_pc.pm_comp, 5)
6097 6097 != DDI_PROP_SUCCESS) {
6098 6098 un->un_power_level = SD_SPINDLE_ACTIVE;
6099 6099 un->un_f_pm_is_enabled = FALSE;
6100 6100 return;
6101 6101 }
6102 6102 } else {
6103 6103 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6104 6104 "pm-components", sd_pwr_ss.pm_comp, 3)
6105 6105 != DDI_PROP_SUCCESS) {
6106 6106 un->un_power_level = SD_SPINDLE_ON;
6107 6107 un->un_f_pm_is_enabled = FALSE;
6108 6108 return;
6109 6109 }
6110 6110 }
6111 6111 /*
6112 6112 * When components are initially created they are idle,
6113 6113 * power up any non-removables.
6114 6114 * Note: the return value of pm_raise_power can't be used
6115 6115 * for determining if PM should be enabled for this device.
6116 6116 * Even if you check the return values and remove this
6117 6117 * property created above, the PM framework will not honor the
6118 6118 * change after the first call to pm_raise_power. Hence,
6119 6119 * removal of that property does not help if pm_raise_power
6120 6120 * fails. In the case of removable media, the start/stop
6121 6121 * will fail if the media is not present.
6122 6122 */
6123 6123 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
6124 6124 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
6125 6125 mutex_enter(SD_MUTEX(un));
6126 6126 un->un_power_level = SD_PM_STATE_ACTIVE(un);
6127 6127 mutex_enter(&un->un_pm_mutex);
6128 6128 /* Set to on and not busy. */
6129 6129 un->un_pm_count = 0;
6130 6130 } else {
6131 6131 mutex_enter(SD_MUTEX(un));
6132 6132 un->un_power_level = SD_PM_STATE_STOPPED(un);
6133 6133 mutex_enter(&un->un_pm_mutex);
6134 6134 /* Set to off. */
6135 6135 un->un_pm_count = -1;
6136 6136 }
6137 6137 mutex_exit(&un->un_pm_mutex);
6138 6138 mutex_exit(SD_MUTEX(un));
6139 6139 }
6140 6140
6141 6141
6142 6142 /*
6143 6143 * Function: sd_ddi_suspend
6144 6144 *
6145 6145 * Description: Performs system power-down operations. This includes
6146 6146 * setting the drive state to indicate its suspended so
6147 6147 * that no new commands will be accepted. Also, wait for
6148 6148 * all commands that are in transport or queued to a timer
6149 6149 * for retry to complete. All timeout threads are cancelled.
6150 6150 *
6151 6151 * Return Code: DDI_FAILURE or DDI_SUCCESS
6152 6152 *
6153 6153 * Context: Kernel thread context
6154 6154 */
6155 6155
6156 6156 static int
6157 6157 sd_ddi_suspend(dev_info_t *devi)
6158 6158 {
6159 6159 struct sd_lun *un;
6160 6160 clock_t wait_cmds_complete;
6161 6161
6162 6162 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6163 6163 if (un == NULL) {
6164 6164 return (DDI_FAILURE);
6165 6165 }
6166 6166
6167 6167 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
6168 6168
6169 6169 mutex_enter(SD_MUTEX(un));
6170 6170
6171 6171 /* Return success if the device is already suspended. */
6172 6172 if (un->un_state == SD_STATE_SUSPENDED) {
6173 6173 mutex_exit(SD_MUTEX(un));
6174 6174 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6175 6175 "device already suspended, exiting\n");
6176 6176 return (DDI_SUCCESS);
6177 6177 }
6178 6178
6179 6179 /* Return failure if the device is being used by HA */
6180 6180 if (un->un_resvd_status &
6181 6181 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
6182 6182 mutex_exit(SD_MUTEX(un));
6183 6183 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6184 6184 "device in use by HA, exiting\n");
6185 6185 return (DDI_FAILURE);
6186 6186 }
6187 6187
6188 6188 /*
6189 6189 * Return failure if the device is in a resource wait
6190 6190 * or power changing state.
6191 6191 */
6192 6192 if ((un->un_state == SD_STATE_RWAIT) ||
6193 6193 (un->un_state == SD_STATE_PM_CHANGING)) {
6194 6194 mutex_exit(SD_MUTEX(un));
6195 6195 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6196 6196 "device in resource wait state, exiting\n");
6197 6197 return (DDI_FAILURE);
6198 6198 }
6199 6199
6200 6200
6201 6201 un->un_save_state = un->un_last_state;
6202 6202 New_state(un, SD_STATE_SUSPENDED);
6203 6203
6204 6204 /*
6205 6205 * Wait for all commands that are in transport or queued to a timer
6206 6206 * for retry to complete.
6207 6207 *
6208 6208 * While waiting, no new commands will be accepted or sent because of
6209 6209 * the new state we set above.
6210 6210 *
6211 6211 * Wait till current operation has completed. If we are in the resource
6212 6212 * wait state (with an intr outstanding) then we need to wait till the
6213 6213 * intr completes and starts the next cmd. We want to wait for
6214 6214 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6215 6215 */
6216 6216 wait_cmds_complete = ddi_get_lbolt() +
6217 6217 (sd_wait_cmds_complete * drv_usectohz(1000000));
6218 6218
6219 6219 while (un->un_ncmds_in_transport != 0) {
6220 6220 /*
6221 6221 * Fail if commands do not finish in the specified time.
6222 6222 */
6223 6223 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
6224 6224 wait_cmds_complete) == -1) {
6225 6225 /*
6226 6226 * Undo the state changes made above. Everything
6227 6227 * must go back to it's original value.
6228 6228 */
6229 6229 Restore_state(un);
6230 6230 un->un_last_state = un->un_save_state;
6231 6231 /* Wake up any threads that might be waiting. */
6232 6232 cv_broadcast(&un->un_suspend_cv);
6233 6233 mutex_exit(SD_MUTEX(un));
6234 6234 SD_ERROR(SD_LOG_IO_PM, un,
6235 6235 "sd_ddi_suspend: failed due to outstanding cmds\n");
6236 6236 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
6237 6237 return (DDI_FAILURE);
6238 6238 }
6239 6239 }
6240 6240
6241 6241 /*
6242 6242 * Cancel SCSI watch thread and timeouts, if any are active
6243 6243 */
6244 6244
6245 6245 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
6246 6246 opaque_t temp_token = un->un_swr_token;
6247 6247 mutex_exit(SD_MUTEX(un));
6248 6248 scsi_watch_suspend(temp_token);
6249 6249 mutex_enter(SD_MUTEX(un));
6250 6250 }
6251 6251
6252 6252 if (un->un_reset_throttle_timeid != NULL) {
6253 6253 timeout_id_t temp_id = un->un_reset_throttle_timeid;
6254 6254 un->un_reset_throttle_timeid = NULL;
6255 6255 mutex_exit(SD_MUTEX(un));
6256 6256 (void) untimeout(temp_id);
6257 6257 mutex_enter(SD_MUTEX(un));
6258 6258 }
6259 6259
6260 6260 if (un->un_dcvb_timeid != NULL) {
6261 6261 timeout_id_t temp_id = un->un_dcvb_timeid;
6262 6262 un->un_dcvb_timeid = NULL;
6263 6263 mutex_exit(SD_MUTEX(un));
6264 6264 (void) untimeout(temp_id);
6265 6265 mutex_enter(SD_MUTEX(un));
6266 6266 }
6267 6267
6268 6268 mutex_enter(&un->un_pm_mutex);
6269 6269 if (un->un_pm_timeid != NULL) {
6270 6270 timeout_id_t temp_id = un->un_pm_timeid;
6271 6271 un->un_pm_timeid = NULL;
6272 6272 mutex_exit(&un->un_pm_mutex);
6273 6273 mutex_exit(SD_MUTEX(un));
6274 6274 (void) untimeout(temp_id);
6275 6275 mutex_enter(SD_MUTEX(un));
6276 6276 } else {
6277 6277 mutex_exit(&un->un_pm_mutex);
6278 6278 }
6279 6279
6280 6280 if (un->un_rmw_msg_timeid != NULL) {
6281 6281 timeout_id_t temp_id = un->un_rmw_msg_timeid;
6282 6282 un->un_rmw_msg_timeid = NULL;
6283 6283 mutex_exit(SD_MUTEX(un));
6284 6284 (void) untimeout(temp_id);
6285 6285 mutex_enter(SD_MUTEX(un));
6286 6286 }
6287 6287
6288 6288 if (un->un_retry_timeid != NULL) {
6289 6289 timeout_id_t temp_id = un->un_retry_timeid;
6290 6290 un->un_retry_timeid = NULL;
6291 6291 mutex_exit(SD_MUTEX(un));
6292 6292 (void) untimeout(temp_id);
6293 6293 mutex_enter(SD_MUTEX(un));
6294 6294
6295 6295 if (un->un_retry_bp != NULL) {
6296 6296 un->un_retry_bp->av_forw = un->un_waitq_headp;
6297 6297 un->un_waitq_headp = un->un_retry_bp;
6298 6298 if (un->un_waitq_tailp == NULL) {
6299 6299 un->un_waitq_tailp = un->un_retry_bp;
6300 6300 }
6301 6301 un->un_retry_bp = NULL;
6302 6302 un->un_retry_statp = NULL;
6303 6303 }
6304 6304 }
6305 6305
6306 6306 if (un->un_direct_priority_timeid != NULL) {
6307 6307 timeout_id_t temp_id = un->un_direct_priority_timeid;
6308 6308 un->un_direct_priority_timeid = NULL;
6309 6309 mutex_exit(SD_MUTEX(un));
6310 6310 (void) untimeout(temp_id);
6311 6311 mutex_enter(SD_MUTEX(un));
6312 6312 }
6313 6313
6314 6314 if (un->un_f_is_fibre == TRUE) {
6315 6315 /*
6316 6316 * Remove callbacks for insert and remove events
6317 6317 */
6318 6318 if (un->un_insert_event != NULL) {
6319 6319 mutex_exit(SD_MUTEX(un));
6320 6320 (void) ddi_remove_event_handler(un->un_insert_cb_id);
6321 6321 mutex_enter(SD_MUTEX(un));
6322 6322 un->un_insert_event = NULL;
6323 6323 }
6324 6324
6325 6325 if (un->un_remove_event != NULL) {
6326 6326 mutex_exit(SD_MUTEX(un));
6327 6327 (void) ddi_remove_event_handler(un->un_remove_cb_id);
6328 6328 mutex_enter(SD_MUTEX(un));
6329 6329 un->un_remove_event = NULL;
6330 6330 }
6331 6331 }
6332 6332
6333 6333 mutex_exit(SD_MUTEX(un));
6334 6334
6335 6335 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6336 6336
6337 6337 return (DDI_SUCCESS);
6338 6338 }
6339 6339
6340 6340
6341 6341 /*
6342 6342 * Function: sd_ddi_resume
6343 6343 *
6344 6344 * Description: Performs system power-up operations..
6345 6345 *
6346 6346 * Return Code: DDI_SUCCESS
6347 6347 * DDI_FAILURE
6348 6348 *
6349 6349 * Context: Kernel thread context
6350 6350 */
6351 6351
6352 6352 static int
6353 6353 sd_ddi_resume(dev_info_t *devi)
6354 6354 {
6355 6355 struct sd_lun *un;
6356 6356
6357 6357 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6358 6358 if (un == NULL) {
6359 6359 return (DDI_FAILURE);
6360 6360 }
6361 6361
6362 6362 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
6363 6363
6364 6364 mutex_enter(SD_MUTEX(un));
6365 6365 Restore_state(un);
6366 6366
6367 6367 /*
6368 6368 * Restore the state which was saved to give the
6369 6369 * the right state in un_last_state
6370 6370 */
6371 6371 un->un_last_state = un->un_save_state;
6372 6372 /*
6373 6373 * Note: throttle comes back at full.
6374 6374 * Also note: this MUST be done before calling pm_raise_power
6375 6375 * otherwise the system can get hung in biowait. The scenario where
6376 6376 * this'll happen is under cpr suspend. Writing of the system
6377 6377 * state goes through sddump, which writes 0 to un_throttle. If
6378 6378 * writing the system state then fails, example if the partition is
6379 6379 * too small, then cpr attempts a resume. If throttle isn't restored
6380 6380 * from the saved value until after calling pm_raise_power then
6381 6381 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6382 6382 * in biowait.
6383 6383 */
6384 6384 un->un_throttle = un->un_saved_throttle;
6385 6385
6386 6386 /*
6387 6387 * The chance of failure is very rare as the only command done in power
6388 6388 * entry point is START command when you transition from 0->1 or
6389 6389 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6390 6390 * which suspend was done. Ignore the return value as the resume should
6391 6391 * not be failed. In the case of removable media the media need not be
6392 6392 * inserted and hence there is a chance that raise power will fail with
6393 6393 * media not present.
6394 6394 */
6395 6395 if (un->un_f_attach_spinup) {
6396 6396 mutex_exit(SD_MUTEX(un));
6397 6397 (void) pm_raise_power(SD_DEVINFO(un), 0,
6398 6398 SD_PM_STATE_ACTIVE(un));
6399 6399 mutex_enter(SD_MUTEX(un));
6400 6400 }
6401 6401
6402 6402 /*
6403 6403 * Don't broadcast to the suspend cv and therefore possibly
6404 6404 * start I/O until after power has been restored.
6405 6405 */
6406 6406 cv_broadcast(&un->un_suspend_cv);
6407 6407 cv_broadcast(&un->un_state_cv);
6408 6408
6409 6409 /* restart thread */
6410 6410 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6411 6411 scsi_watch_resume(un->un_swr_token);
6412 6412 }
6413 6413
6414 6414 #if (defined(__fibre))
6415 6415 if (un->un_f_is_fibre == TRUE) {
6416 6416 /*
6417 6417 * Add callbacks for insert and remove events
6418 6418 */
6419 6419 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6420 6420 sd_init_event_callbacks(un);
6421 6421 }
6422 6422 }
6423 6423 #endif
6424 6424
6425 6425 /*
6426 6426 * Transport any pending commands to the target.
6427 6427 *
6428 6428 * If this is a low-activity device commands in queue will have to wait
6429 6429 * until new commands come in, which may take awhile. Also, we
6430 6430 * specifically don't check un_ncmds_in_transport because we know that
6431 6431 * there really are no commands in progress after the unit was
6432 6432 * suspended and we could have reached the throttle level, been
6433 6433 * suspended, and have no new commands coming in for awhile. Highly
6434 6434 * unlikely, but so is the low-activity disk scenario.
6435 6435 */
6436 6436 ddi_xbuf_dispatch(un->un_xbuf_attr);
6437 6437
6438 6438 sd_start_cmds(un, NULL);
6439 6439 mutex_exit(SD_MUTEX(un));
6440 6440
6441 6441 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
6442 6442
6443 6443 return (DDI_SUCCESS);
6444 6444 }
6445 6445
6446 6446
6447 6447 /*
6448 6448 * Function: sd_pm_state_change
6449 6449 *
6450 6450 * Description: Change the driver power state.
6451 6451 * Someone else is required to actually change the driver
6452 6452 * power level.
6453 6453 *
6454 6454 * Arguments: un - driver soft state (unit) structure
6455 6455 * level - the power level that is changed to
6456 6456 * flag - to decide how to change the power state
6457 6457 *
6458 6458 * Return Code: DDI_SUCCESS
6459 6459 *
6460 6460 * Context: Kernel thread context
6461 6461 */
6462 6462 static int
6463 6463 sd_pm_state_change(struct sd_lun *un, int level, int flag)
6464 6464 {
6465 6465 ASSERT(un != NULL);
6466 6466 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
6467 6467
6468 6468 ASSERT(!mutex_owned(SD_MUTEX(un)));
6469 6469 mutex_enter(SD_MUTEX(un));
6470 6470
6471 6471 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
6472 6472 un->un_power_level = level;
6473 6473 ASSERT(!mutex_owned(&un->un_pm_mutex));
6474 6474 mutex_enter(&un->un_pm_mutex);
6475 6475 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
6476 6476 un->un_pm_count++;
6477 6477 ASSERT(un->un_pm_count == 0);
6478 6478 }
6479 6479 mutex_exit(&un->un_pm_mutex);
6480 6480 } else {
6481 6481 /*
6482 6482 * Exit if power management is not enabled for this device,
6483 6483 * or if the device is being used by HA.
6484 6484 */
6485 6485 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
6486 6486 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
6487 6487 mutex_exit(SD_MUTEX(un));
6488 6488 SD_TRACE(SD_LOG_POWER, un,
6489 6489 "sd_pm_state_change: exiting\n");
6490 6490 return (DDI_FAILURE);
6491 6491 }
6492 6492
6493 6493 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
6494 6494 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
6495 6495
6496 6496 /*
6497 6497 * See if the device is not busy, ie.:
6498 6498 * - we have no commands in the driver for this device
6499 6499 * - not waiting for resources
6500 6500 */
6501 6501 if ((un->un_ncmds_in_driver == 0) &&
6502 6502 (un->un_state != SD_STATE_RWAIT)) {
6503 6503 /*
6504 6504 * The device is not busy, so it is OK to go to low
6505 6505 * power state. Indicate low power, but rely on someone
6506 6506 * else to actually change it.
6507 6507 */
6508 6508 mutex_enter(&un->un_pm_mutex);
6509 6509 un->un_pm_count = -1;
6510 6510 mutex_exit(&un->un_pm_mutex);
6511 6511 un->un_power_level = level;
6512 6512 }
6513 6513 }
6514 6514
6515 6515 mutex_exit(SD_MUTEX(un));
6516 6516
6517 6517 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
6518 6518
6519 6519 return (DDI_SUCCESS);
6520 6520 }
6521 6521
6522 6522
6523 6523 /*
6524 6524 * Function: sd_pm_idletimeout_handler
6525 6525 *
6526 6526 * Description: A timer routine that's active only while a device is busy.
6527 6527 * The purpose is to extend slightly the pm framework's busy
6528 6528 * view of the device to prevent busy/idle thrashing for
6529 6529 * back-to-back commands. Do this by comparing the current time
6530 6530 * to the time at which the last command completed and when the
6531 6531 * difference is greater than sd_pm_idletime, call
6532 6532 * pm_idle_component. In addition to indicating idle to the pm
6533 6533 * framework, update the chain type to again use the internal pm
↓ open down ↓ |
6495 lines elided |
↑ open up ↑ |
6534 6534 * layers of the driver.
6535 6535 *
6536 6536 * Arguments: arg - driver soft state (unit) structure
6537 6537 *
6538 6538 * Context: Executes in a timeout(9F) thread context
6539 6539 */
6540 6540
6541 6541 static void
6542 6542 sd_pm_idletimeout_handler(void *arg)
6543 6543 {
6544 + const hrtime_t idletime = sd_pm_idletime * NANOSEC;
6544 6545 struct sd_lun *un = arg;
6545 6546
6546 - time_t now;
6547 -
6548 6547 mutex_enter(&sd_detach_mutex);
6549 6548 if (un->un_detach_count != 0) {
6550 6549 /* Abort if the instance is detaching */
6551 6550 mutex_exit(&sd_detach_mutex);
6552 6551 return;
6553 6552 }
6554 6553 mutex_exit(&sd_detach_mutex);
6555 6554
6556 - now = ddi_get_time();
6557 6555 /*
6558 6556 * Grab both mutexes, in the proper order, since we're accessing
6559 6557 * both PM and softstate variables.
6560 6558 */
6561 6559 mutex_enter(SD_MUTEX(un));
6562 6560 mutex_enter(&un->un_pm_mutex);
6563 - if (((now - un->un_pm_idle_time) > sd_pm_idletime) &&
6561 + if (((gethrtime() - un->un_pm_idle_time) > idletime) &&
6564 6562 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
6565 6563 /*
6566 6564 * Update the chain types.
6567 6565 * This takes affect on the next new command received.
6568 6566 */
6569 6567 if (un->un_f_non_devbsize_supported) {
6570 6568 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
6571 6569 } else {
6572 6570 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
6573 6571 }
6574 6572 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
6575 6573
6576 6574 SD_TRACE(SD_LOG_IO_PM, un,
6577 6575 "sd_pm_idletimeout_handler: idling device\n");
6578 6576 (void) pm_idle_component(SD_DEVINFO(un), 0);
6579 6577 un->un_pm_idle_timeid = NULL;
6580 6578 } else {
6581 6579 un->un_pm_idle_timeid =
6582 6580 timeout(sd_pm_idletimeout_handler, un,
6583 6581 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6584 6582 }
6585 6583 mutex_exit(&un->un_pm_mutex);
6586 6584 mutex_exit(SD_MUTEX(un));
6587 6585 }
6588 6586
6589 6587
6590 6588 /*
6591 6589 * Function: sd_pm_timeout_handler
6592 6590 *
6593 6591 * Description: Callback to tell framework we are idle.
6594 6592 *
6595 6593 * Context: timeout(9f) thread context.
6596 6594 */
6597 6595
6598 6596 static void
6599 6597 sd_pm_timeout_handler(void *arg)
6600 6598 {
6601 6599 struct sd_lun *un = arg;
6602 6600
6603 6601 (void) pm_idle_component(SD_DEVINFO(un), 0);
6604 6602 mutex_enter(&un->un_pm_mutex);
6605 6603 un->un_pm_timeid = NULL;
6606 6604 mutex_exit(&un->un_pm_mutex);
6607 6605 }
6608 6606
6609 6607
6610 6608 /*
6611 6609 * Function: sdpower
6612 6610 *
6613 6611 * Description: PM entry point.
6614 6612 *
6615 6613 * Return Code: DDI_SUCCESS
6616 6614 * DDI_FAILURE
6617 6615 *
6618 6616 * Context: Kernel thread context
6619 6617 */
6620 6618
6621 6619 static int
6622 6620 sdpower(dev_info_t *devi, int component, int level)
6623 6621 {
6624 6622 struct sd_lun *un;
6625 6623 int instance;
6626 6624 int rval = DDI_SUCCESS;
6627 6625 uint_t i, log_page_size, maxcycles, ncycles;
6628 6626 uchar_t *log_page_data;
6629 6627 int log_sense_page;
6630 6628 int medium_present;
6631 6629 time_t intvlp;
6632 6630 struct pm_trans_data sd_pm_tran_data;
6633 6631 uchar_t save_state;
6634 6632 int sval;
6635 6633 uchar_t state_before_pm;
6636 6634 int got_semaphore_here;
6637 6635 sd_ssc_t *ssc;
6638 6636 int last_power_level;
6639 6637
6640 6638 instance = ddi_get_instance(devi);
6641 6639
6642 6640 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
6643 6641 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
6644 6642 return (DDI_FAILURE);
6645 6643 }
6646 6644
6647 6645 ssc = sd_ssc_init(un);
6648 6646
6649 6647 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
6650 6648
6651 6649 /*
6652 6650 * Must synchronize power down with close.
6653 6651 * Attempt to decrement/acquire the open/close semaphore,
6654 6652 * but do NOT wait on it. If it's not greater than zero,
6655 6653 * ie. it can't be decremented without waiting, then
6656 6654 * someone else, either open or close, already has it
6657 6655 * and the try returns 0. Use that knowledge here to determine
6658 6656 * if it's OK to change the device power level.
6659 6657 * Also, only increment it on exit if it was decremented, ie. gotten,
6660 6658 * here.
6661 6659 */
6662 6660 got_semaphore_here = sema_tryp(&un->un_semoclose);
6663 6661
6664 6662 mutex_enter(SD_MUTEX(un));
6665 6663
6666 6664 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
6667 6665 un->un_ncmds_in_driver);
6668 6666
6669 6667 /*
6670 6668 * If un_ncmds_in_driver is non-zero it indicates commands are
6671 6669 * already being processed in the driver, or if the semaphore was
6672 6670 * not gotten here it indicates an open or close is being processed.
6673 6671 * At the same time somebody is requesting to go to a lower power
6674 6672 * that can't perform I/O, which can't happen, therefore we need to
6675 6673 * return failure.
6676 6674 */
6677 6675 if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
6678 6676 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) {
6679 6677 mutex_exit(SD_MUTEX(un));
6680 6678
6681 6679 if (got_semaphore_here != 0) {
6682 6680 sema_v(&un->un_semoclose);
6683 6681 }
6684 6682 SD_TRACE(SD_LOG_IO_PM, un,
6685 6683 "sdpower: exit, device has queued cmds.\n");
6686 6684
6687 6685 goto sdpower_failed;
6688 6686 }
6689 6687
6690 6688 /*
6691 6689 * if it is OFFLINE that means the disk is completely dead
6692 6690 * in our case we have to put the disk in on or off by sending commands
6693 6691 * Of course that will fail anyway so return back here.
6694 6692 *
6695 6693 * Power changes to a device that's OFFLINE or SUSPENDED
6696 6694 * are not allowed.
6697 6695 */
6698 6696 if ((un->un_state == SD_STATE_OFFLINE) ||
6699 6697 (un->un_state == SD_STATE_SUSPENDED)) {
6700 6698 mutex_exit(SD_MUTEX(un));
6701 6699
6702 6700 if (got_semaphore_here != 0) {
6703 6701 sema_v(&un->un_semoclose);
6704 6702 }
6705 6703 SD_TRACE(SD_LOG_IO_PM, un,
6706 6704 "sdpower: exit, device is off-line.\n");
6707 6705
6708 6706 goto sdpower_failed;
6709 6707 }
6710 6708
6711 6709 /*
6712 6710 * Change the device's state to indicate it's power level
6713 6711 * is being changed. Do this to prevent a power off in the
6714 6712 * middle of commands, which is especially bad on devices
6715 6713 * that are really powered off instead of just spun down.
6716 6714 */
6717 6715 state_before_pm = un->un_state;
6718 6716 un->un_state = SD_STATE_PM_CHANGING;
6719 6717
6720 6718 mutex_exit(SD_MUTEX(un));
6721 6719
6722 6720 /*
6723 6721 * If log sense command is not supported, bypass the
6724 6722 * following checking, otherwise, check the log sense
6725 6723 * information for this device.
6726 6724 */
6727 6725 if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
6728 6726 un->un_f_log_sense_supported) {
6729 6727 /*
6730 6728 * Get the log sense information to understand whether the
6731 6729 * the powercycle counts have gone beyond the threshhold.
6732 6730 */
6733 6731 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6734 6732 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6735 6733
6736 6734 mutex_enter(SD_MUTEX(un));
6737 6735 log_sense_page = un->un_start_stop_cycle_page;
6738 6736 mutex_exit(SD_MUTEX(un));
6739 6737
6740 6738 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6741 6739 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
6742 6740
6743 6741 if (rval != 0) {
6744 6742 if (rval == EIO)
6745 6743 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6746 6744 else
6747 6745 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6748 6746 }
6749 6747
6750 6748 #ifdef SDDEBUG
6751 6749 if (sd_force_pm_supported) {
6752 6750 /* Force a successful result */
6753 6751 rval = 0;
6754 6752 }
6755 6753 #endif
6756 6754 if (rval != 0) {
6757 6755 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
6758 6756 "Log Sense Failed\n");
6759 6757
6760 6758 kmem_free(log_page_data, log_page_size);
6761 6759 /* Cannot support power management on those drives */
6762 6760
6763 6761 if (got_semaphore_here != 0) {
6764 6762 sema_v(&un->un_semoclose);
6765 6763 }
6766 6764 /*
6767 6765 * On exit put the state back to it's original value
6768 6766 * and broadcast to anyone waiting for the power
6769 6767 * change completion.
6770 6768 */
6771 6769 mutex_enter(SD_MUTEX(un));
6772 6770 un->un_state = state_before_pm;
6773 6771 cv_broadcast(&un->un_suspend_cv);
6774 6772 mutex_exit(SD_MUTEX(un));
6775 6773 SD_TRACE(SD_LOG_IO_PM, un,
6776 6774 "sdpower: exit, Log Sense Failed.\n");
6777 6775
6778 6776 goto sdpower_failed;
6779 6777 }
6780 6778
6781 6779 /*
6782 6780 * From the page data - Convert the essential information to
6783 6781 * pm_trans_data
6784 6782 */
6785 6783 maxcycles =
6786 6784 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
6787 6785 (log_page_data[0x1E] << 8) | log_page_data[0x1F];
6788 6786
6789 6787 ncycles =
6790 6788 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
6791 6789 (log_page_data[0x26] << 8) | log_page_data[0x27];
6792 6790
6793 6791 if (un->un_f_pm_log_sense_smart) {
6794 6792 sd_pm_tran_data.un.smart_count.allowed = maxcycles;
6795 6793 sd_pm_tran_data.un.smart_count.consumed = ncycles;
6796 6794 sd_pm_tran_data.un.smart_count.flag = 0;
6797 6795 sd_pm_tran_data.format = DC_SMART_FORMAT;
6798 6796 } else {
6799 6797 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
6800 6798 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
6801 6799 for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
6802 6800 sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
6803 6801 log_page_data[8+i];
6804 6802 }
6805 6803 sd_pm_tran_data.un.scsi_cycles.flag = 0;
6806 6804 sd_pm_tran_data.format = DC_SCSI_FORMAT;
6807 6805 }
6808 6806
6809 6807 kmem_free(log_page_data, log_page_size);
6810 6808
6811 6809 /*
6812 6810 * Call pm_trans_check routine to get the Ok from
6813 6811 * the global policy
6814 6812 */
6815 6813 rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
6816 6814 #ifdef SDDEBUG
6817 6815 if (sd_force_pm_supported) {
6818 6816 /* Force a successful result */
6819 6817 rval = 1;
6820 6818 }
6821 6819 #endif
6822 6820 switch (rval) {
6823 6821 case 0:
6824 6822 /*
6825 6823 * Not Ok to Power cycle or error in parameters passed
6826 6824 * Would have given the advised time to consider power
6827 6825 * cycle. Based on the new intvlp parameter we are
6828 6826 * supposed to pretend we are busy so that pm framework
6829 6827 * will never call our power entry point. Because of
6830 6828 * that install a timeout handler and wait for the
6831 6829 * recommended time to elapse so that power management
6832 6830 * can be effective again.
6833 6831 *
6834 6832 * To effect this behavior, call pm_busy_component to
6835 6833 * indicate to the framework this device is busy.
6836 6834 * By not adjusting un_pm_count the rest of PM in
6837 6835 * the driver will function normally, and independent
6838 6836 * of this but because the framework is told the device
6839 6837 * is busy it won't attempt powering down until it gets
6840 6838 * a matching idle. The timeout handler sends this.
6841 6839 * Note: sd_pm_entry can't be called here to do this
6842 6840 * because sdpower may have been called as a result
6843 6841 * of a call to pm_raise_power from within sd_pm_entry.
6844 6842 *
6845 6843 * If a timeout handler is already active then
6846 6844 * don't install another.
6847 6845 */
6848 6846 mutex_enter(&un->un_pm_mutex);
6849 6847 if (un->un_pm_timeid == NULL) {
6850 6848 un->un_pm_timeid =
6851 6849 timeout(sd_pm_timeout_handler,
6852 6850 un, intvlp * drv_usectohz(1000000));
6853 6851 mutex_exit(&un->un_pm_mutex);
6854 6852 (void) pm_busy_component(SD_DEVINFO(un), 0);
6855 6853 } else {
6856 6854 mutex_exit(&un->un_pm_mutex);
6857 6855 }
6858 6856 if (got_semaphore_here != 0) {
6859 6857 sema_v(&un->un_semoclose);
6860 6858 }
6861 6859 /*
6862 6860 * On exit put the state back to it's original value
6863 6861 * and broadcast to anyone waiting for the power
6864 6862 * change completion.
6865 6863 */
6866 6864 mutex_enter(SD_MUTEX(un));
6867 6865 un->un_state = state_before_pm;
6868 6866 cv_broadcast(&un->un_suspend_cv);
6869 6867 mutex_exit(SD_MUTEX(un));
6870 6868
6871 6869 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
6872 6870 "trans check Failed, not ok to power cycle.\n");
6873 6871
6874 6872 goto sdpower_failed;
6875 6873 case -1:
6876 6874 if (got_semaphore_here != 0) {
6877 6875 sema_v(&un->un_semoclose);
6878 6876 }
6879 6877 /*
6880 6878 * On exit put the state back to it's original value
6881 6879 * and broadcast to anyone waiting for the power
6882 6880 * change completion.
6883 6881 */
6884 6882 mutex_enter(SD_MUTEX(un));
6885 6883 un->un_state = state_before_pm;
6886 6884 cv_broadcast(&un->un_suspend_cv);
6887 6885 mutex_exit(SD_MUTEX(un));
6888 6886 SD_TRACE(SD_LOG_IO_PM, un,
6889 6887 "sdpower: exit, trans check command Failed.\n");
6890 6888
6891 6889 goto sdpower_failed;
6892 6890 }
6893 6891 }
6894 6892
6895 6893 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6896 6894 /*
6897 6895 * Save the last state... if the STOP FAILS we need it
6898 6896 * for restoring
6899 6897 */
6900 6898 mutex_enter(SD_MUTEX(un));
6901 6899 save_state = un->un_last_state;
6902 6900 last_power_level = un->un_power_level;
6903 6901 /*
6904 6902 * There must not be any cmds. getting processed
6905 6903 * in the driver when we get here. Power to the
6906 6904 * device is potentially going off.
6907 6905 */
6908 6906 ASSERT(un->un_ncmds_in_driver == 0);
6909 6907 mutex_exit(SD_MUTEX(un));
6910 6908
6911 6909 /*
6912 6910 * For now PM suspend the device completely before spindle is
6913 6911 * turned off
6914 6912 */
6915 6913 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
6916 6914 == DDI_FAILURE) {
6917 6915 if (got_semaphore_here != 0) {
6918 6916 sema_v(&un->un_semoclose);
6919 6917 }
6920 6918 /*
6921 6919 * On exit put the state back to it's original value
6922 6920 * and broadcast to anyone waiting for the power
6923 6921 * change completion.
6924 6922 */
6925 6923 mutex_enter(SD_MUTEX(un));
6926 6924 un->un_state = state_before_pm;
6927 6925 un->un_power_level = last_power_level;
6928 6926 cv_broadcast(&un->un_suspend_cv);
6929 6927 mutex_exit(SD_MUTEX(un));
6930 6928 SD_TRACE(SD_LOG_IO_PM, un,
6931 6929 "sdpower: exit, PM suspend Failed.\n");
6932 6930
6933 6931 goto sdpower_failed;
6934 6932 }
6935 6933 }
6936 6934
6937 6935 /*
6938 6936 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6939 6937 * close, or strategy. Dump no long uses this routine, it uses it's
6940 6938 * own code so it can be done in polled mode.
6941 6939 */
6942 6940
6943 6941 medium_present = TRUE;
6944 6942
6945 6943 /*
6946 6944 * When powering up, issue a TUR in case the device is at unit
6947 6945 * attention. Don't do retries. Bypass the PM layer, otherwise
6948 6946 * a deadlock on un_pm_busy_cv will occur.
6949 6947 */
6950 6948 if (SD_PM_IS_IO_CAPABLE(un, level)) {
6951 6949 sval = sd_send_scsi_TEST_UNIT_READY(ssc,
6952 6950 SD_DONT_RETRY_TUR | SD_BYPASS_PM);
6953 6951 if (sval != 0)
6954 6952 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6955 6953 }
6956 6954
6957 6955 if (un->un_f_power_condition_supported) {
6958 6956 char *pm_condition_name[] = {"STOPPED", "STANDBY",
6959 6957 "IDLE", "ACTIVE"};
6960 6958 SD_TRACE(SD_LOG_IO_PM, un,
6961 6959 "sdpower: sending \'%s\' power condition",
6962 6960 pm_condition_name[level]);
6963 6961 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
6964 6962 sd_pl2pc[level], SD_PATH_DIRECT);
6965 6963 } else {
6966 6964 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
6967 6965 ((level == SD_SPINDLE_ON) ? "START" : "STOP"));
6968 6966 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
6969 6967 ((level == SD_SPINDLE_ON) ? SD_TARGET_START :
6970 6968 SD_TARGET_STOP), SD_PATH_DIRECT);
6971 6969 }
6972 6970 if (sval != 0) {
6973 6971 if (sval == EIO)
6974 6972 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6975 6973 else
6976 6974 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6977 6975 }
6978 6976
6979 6977 /* Command failed, check for media present. */
6980 6978 if ((sval == ENXIO) && un->un_f_has_removable_media) {
6981 6979 medium_present = FALSE;
6982 6980 }
6983 6981
6984 6982 /*
6985 6983 * The conditions of interest here are:
6986 6984 * if a spindle off with media present fails,
6987 6985 * then restore the state and return an error.
6988 6986 * else if a spindle on fails,
6989 6987 * then return an error (there's no state to restore).
6990 6988 * In all other cases we setup for the new state
6991 6989 * and return success.
6992 6990 */
6993 6991 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6994 6992 if ((medium_present == TRUE) && (sval != 0)) {
6995 6993 /* The stop command from above failed */
6996 6994 rval = DDI_FAILURE;
6997 6995 /*
6998 6996 * The stop command failed, and we have media
6999 6997 * present. Put the level back by calling the
7000 6998 * sd_pm_resume() and set the state back to
7001 6999 * it's previous value.
7002 7000 */
7003 7001 (void) sd_pm_state_change(un, last_power_level,
7004 7002 SD_PM_STATE_ROLLBACK);
7005 7003 mutex_enter(SD_MUTEX(un));
7006 7004 un->un_last_state = save_state;
7007 7005 mutex_exit(SD_MUTEX(un));
7008 7006 } else if (un->un_f_monitor_media_state) {
7009 7007 /*
7010 7008 * The stop command from above succeeded.
7011 7009 * Terminate watch thread in case of removable media
7012 7010 * devices going into low power state. This is as per
7013 7011 * the requirements of pm framework, otherwise commands
7014 7012 * will be generated for the device (through watch
7015 7013 * thread), even when the device is in low power state.
7016 7014 */
7017 7015 mutex_enter(SD_MUTEX(un));
7018 7016 un->un_f_watcht_stopped = FALSE;
7019 7017 if (un->un_swr_token != NULL) {
7020 7018 opaque_t temp_token = un->un_swr_token;
7021 7019 un->un_f_watcht_stopped = TRUE;
7022 7020 un->un_swr_token = NULL;
7023 7021 mutex_exit(SD_MUTEX(un));
7024 7022 (void) scsi_watch_request_terminate(temp_token,
7025 7023 SCSI_WATCH_TERMINATE_ALL_WAIT);
7026 7024 } else {
7027 7025 mutex_exit(SD_MUTEX(un));
7028 7026 }
7029 7027 }
7030 7028 } else {
7031 7029 /*
7032 7030 * The level requested is I/O capable.
7033 7031 * Legacy behavior: return success on a failed spinup
7034 7032 * if there is no media in the drive.
7035 7033 * Do this by looking at medium_present here.
7036 7034 */
7037 7035 if ((sval != 0) && medium_present) {
7038 7036 /* The start command from above failed */
7039 7037 rval = DDI_FAILURE;
7040 7038 } else {
7041 7039 /*
7042 7040 * The start command from above succeeded
7043 7041 * PM resume the devices now that we have
7044 7042 * started the disks
7045 7043 */
7046 7044 (void) sd_pm_state_change(un, level,
7047 7045 SD_PM_STATE_CHANGE);
7048 7046
7049 7047 /*
7050 7048 * Resume the watch thread since it was suspended
7051 7049 * when the device went into low power mode.
7052 7050 */
7053 7051 if (un->un_f_monitor_media_state) {
7054 7052 mutex_enter(SD_MUTEX(un));
7055 7053 if (un->un_f_watcht_stopped == TRUE) {
7056 7054 opaque_t temp_token;
7057 7055
7058 7056 un->un_f_watcht_stopped = FALSE;
7059 7057 mutex_exit(SD_MUTEX(un));
7060 7058 temp_token =
7061 7059 sd_watch_request_submit(un);
7062 7060 mutex_enter(SD_MUTEX(un));
7063 7061 un->un_swr_token = temp_token;
7064 7062 }
7065 7063 mutex_exit(SD_MUTEX(un));
7066 7064 }
7067 7065 }
7068 7066 }
7069 7067
7070 7068 if (got_semaphore_here != 0) {
7071 7069 sema_v(&un->un_semoclose);
7072 7070 }
7073 7071 /*
7074 7072 * On exit put the state back to it's original value
7075 7073 * and broadcast to anyone waiting for the power
7076 7074 * change completion.
7077 7075 */
7078 7076 mutex_enter(SD_MUTEX(un));
7079 7077 un->un_state = state_before_pm;
7080 7078 cv_broadcast(&un->un_suspend_cv);
7081 7079 mutex_exit(SD_MUTEX(un));
7082 7080
7083 7081 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
7084 7082
7085 7083 sd_ssc_fini(ssc);
7086 7084 return (rval);
7087 7085
7088 7086 sdpower_failed:
7089 7087
7090 7088 sd_ssc_fini(ssc);
7091 7089 return (DDI_FAILURE);
7092 7090 }
7093 7091
7094 7092
7095 7093
7096 7094 /*
7097 7095 * Function: sdattach
7098 7096 *
7099 7097 * Description: Driver's attach(9e) entry point function.
7100 7098 *
7101 7099 * Arguments: devi - opaque device info handle
7102 7100 * cmd - attach type
7103 7101 *
7104 7102 * Return Code: DDI_SUCCESS
7105 7103 * DDI_FAILURE
7106 7104 *
7107 7105 * Context: Kernel thread context
7108 7106 */
7109 7107
7110 7108 static int
7111 7109 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7112 7110 {
7113 7111 switch (cmd) {
7114 7112 case DDI_ATTACH:
7115 7113 return (sd_unit_attach(devi));
7116 7114 case DDI_RESUME:
7117 7115 return (sd_ddi_resume(devi));
7118 7116 default:
7119 7117 break;
7120 7118 }
7121 7119 return (DDI_FAILURE);
7122 7120 }
7123 7121
7124 7122
7125 7123 /*
7126 7124 * Function: sddetach
7127 7125 *
7128 7126 * Description: Driver's detach(9E) entry point function.
7129 7127 *
7130 7128 * Arguments: devi - opaque device info handle
7131 7129 * cmd - detach type
7132 7130 *
7133 7131 * Return Code: DDI_SUCCESS
7134 7132 * DDI_FAILURE
7135 7133 *
7136 7134 * Context: Kernel thread context
7137 7135 */
7138 7136
7139 7137 static int
7140 7138 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7141 7139 {
7142 7140 switch (cmd) {
7143 7141 case DDI_DETACH:
7144 7142 return (sd_unit_detach(devi));
7145 7143 case DDI_SUSPEND:
7146 7144 return (sd_ddi_suspend(devi));
7147 7145 default:
7148 7146 break;
7149 7147 }
7150 7148 return (DDI_FAILURE);
7151 7149 }
7152 7150
7153 7151
7154 7152 /*
7155 7153 * Function: sd_sync_with_callback
7156 7154 *
7157 7155 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7158 7156 * state while the callback routine is active.
7159 7157 *
7160 7158 * Arguments: un: softstate structure for the instance
7161 7159 *
7162 7160 * Context: Kernel thread context
7163 7161 */
7164 7162
7165 7163 static void
7166 7164 sd_sync_with_callback(struct sd_lun *un)
7167 7165 {
7168 7166 ASSERT(un != NULL);
7169 7167
7170 7168 mutex_enter(SD_MUTEX(un));
7171 7169
7172 7170 ASSERT(un->un_in_callback >= 0);
7173 7171
7174 7172 while (un->un_in_callback > 0) {
7175 7173 mutex_exit(SD_MUTEX(un));
7176 7174 delay(2);
7177 7175 mutex_enter(SD_MUTEX(un));
7178 7176 }
7179 7177
7180 7178 mutex_exit(SD_MUTEX(un));
7181 7179 }
7182 7180
7183 7181 /*
7184 7182 * Function: sd_unit_attach
7185 7183 *
7186 7184 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7187 7185 * the soft state structure for the device and performs
7188 7186 * all necessary structure and device initializations.
7189 7187 *
7190 7188 * Arguments: devi: the system's dev_info_t for the device.
7191 7189 *
7192 7190 * Return Code: DDI_SUCCESS if attach is successful.
7193 7191 * DDI_FAILURE if any part of the attach fails.
7194 7192 *
7195 7193 * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7196 7194 * Kernel thread context only. Can sleep.
7197 7195 */
7198 7196
7199 7197 static int
7200 7198 sd_unit_attach(dev_info_t *devi)
7201 7199 {
7202 7200 struct scsi_device *devp;
7203 7201 struct sd_lun *un;
7204 7202 char *variantp;
7205 7203 char name_str[48];
7206 7204 int reservation_flag = SD_TARGET_IS_UNRESERVED;
7207 7205 int instance;
7208 7206 int rval;
7209 7207 int wc_enabled;
7210 7208 int tgt;
7211 7209 uint64_t capacity;
7212 7210 uint_t lbasize = 0;
7213 7211 dev_info_t *pdip = ddi_get_parent(devi);
7214 7212 int offbyone = 0;
7215 7213 int geom_label_valid = 0;
7216 7214 sd_ssc_t *ssc;
7217 7215 int status;
7218 7216 struct sd_fm_internal *sfip = NULL;
7219 7217 int max_xfer_size;
7220 7218
7221 7219 /*
7222 7220 * Retrieve the target driver's private data area. This was set
7223 7221 * up by the HBA.
7224 7222 */
7225 7223 devp = ddi_get_driver_private(devi);
7226 7224
7227 7225 /*
7228 7226 * Retrieve the target ID of the device.
7229 7227 */
7230 7228 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7231 7229 SCSI_ADDR_PROP_TARGET, -1);
7232 7230
7233 7231 /*
7234 7232 * Since we have no idea what state things were left in by the last
7235 7233 * user of the device, set up some 'default' settings, ie. turn 'em
7236 7234 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7237 7235 * Do this before the scsi_probe, which sends an inquiry.
7238 7236 * This is a fix for bug (4430280).
7239 7237 * Of special importance is wide-xfer. The drive could have been left
7240 7238 * in wide transfer mode by the last driver to communicate with it,
7241 7239 * this includes us. If that's the case, and if the following is not
7242 7240 * setup properly or we don't re-negotiate with the drive prior to
7243 7241 * transferring data to/from the drive, it causes bus parity errors,
7244 7242 * data overruns, and unexpected interrupts. This first occurred when
7245 7243 * the fix for bug (4378686) was made.
7246 7244 */
7247 7245 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
7248 7246 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
7249 7247 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
7250 7248
7251 7249 /*
7252 7250 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7253 7251 * on a target. Setting it per lun instance actually sets the
7254 7252 * capability of this target, which affects those luns already
7255 7253 * attached on the same target. So during attach, we can only disable
7256 7254 * this capability only when no other lun has been attached on this
7257 7255 * target. By doing this, we assume a target has the same tagged-qing
7258 7256 * capability for every lun. The condition can be removed when HBA
7259 7257 * is changed to support per lun based tagged-qing capability.
7260 7258 */
7261 7259 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
7262 7260 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
7263 7261 }
7264 7262
7265 7263 /*
7266 7264 * Use scsi_probe() to issue an INQUIRY command to the device.
7267 7265 * This call will allocate and fill in the scsi_inquiry structure
7268 7266 * and point the sd_inq member of the scsi_device structure to it.
7269 7267 * If the attach succeeds, then this memory will not be de-allocated
7270 7268 * (via scsi_unprobe()) until the instance is detached.
7271 7269 */
7272 7270 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
7273 7271 goto probe_failed;
7274 7272 }
7275 7273
7276 7274 /*
7277 7275 * Check the device type as specified in the inquiry data and
7278 7276 * claim it if it is of a type that we support.
7279 7277 */
7280 7278 switch (devp->sd_inq->inq_dtype) {
7281 7279 case DTYPE_DIRECT:
7282 7280 break;
7283 7281 case DTYPE_RODIRECT:
7284 7282 break;
7285 7283 case DTYPE_OPTICAL:
7286 7284 break;
7287 7285 case DTYPE_NOTPRESENT:
7288 7286 default:
7289 7287 /* Unsupported device type; fail the attach. */
7290 7288 goto probe_failed;
7291 7289 }
7292 7290
7293 7291 /*
7294 7292 * Allocate the soft state structure for this unit.
7295 7293 *
7296 7294 * We rely upon this memory being set to all zeroes by
7297 7295 * ddi_soft_state_zalloc(). We assume that any member of the
7298 7296 * soft state structure that is not explicitly initialized by
7299 7297 * this routine will have a value of zero.
7300 7298 */
7301 7299 instance = ddi_get_instance(devp->sd_dev);
7302 7300 #ifndef XPV_HVM_DRIVER
7303 7301 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
7304 7302 goto probe_failed;
7305 7303 }
7306 7304 #endif /* !XPV_HVM_DRIVER */
7307 7305
7308 7306 /*
7309 7307 * Retrieve a pointer to the newly-allocated soft state.
7310 7308 *
7311 7309 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7312 7310 * was successful, unless something has gone horribly wrong and the
7313 7311 * ddi's soft state internals are corrupt (in which case it is
7314 7312 * probably better to halt here than just fail the attach....)
7315 7313 */
7316 7314 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7317 7315 panic("sd_unit_attach: NULL soft state on instance:0x%x",
7318 7316 instance);
7319 7317 /*NOTREACHED*/
7320 7318 }
7321 7319
7322 7320 /*
7323 7321 * Link the back ptr of the driver soft state to the scsi_device
7324 7322 * struct for this lun.
7325 7323 * Save a pointer to the softstate in the driver-private area of
7326 7324 * the scsi_device struct.
7327 7325 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7328 7326 * we first set un->un_sd below.
7329 7327 */
7330 7328 un->un_sd = devp;
7331 7329 devp->sd_private = (opaque_t)un;
7332 7330
7333 7331 /*
7334 7332 * The following must be after devp is stored in the soft state struct.
7335 7333 */
7336 7334 #ifdef SDDEBUG
7337 7335 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7338 7336 "%s_unit_attach: un:0x%p instance:%d\n",
7339 7337 ddi_driver_name(devi), un, instance);
7340 7338 #endif
7341 7339
7342 7340 /*
7343 7341 * Set up the device type and node type (for the minor nodes).
7344 7342 * By default we assume that the device can at least support the
7345 7343 * Common Command Set. Call it a CD-ROM if it reports itself
7346 7344 * as a RODIRECT device.
7347 7345 */
7348 7346 switch (devp->sd_inq->inq_dtype) {
7349 7347 case DTYPE_RODIRECT:
7350 7348 un->un_node_type = DDI_NT_CD_CHAN;
7351 7349 un->un_ctype = CTYPE_CDROM;
7352 7350 break;
7353 7351 case DTYPE_OPTICAL:
7354 7352 un->un_node_type = DDI_NT_BLOCK_CHAN;
7355 7353 un->un_ctype = CTYPE_ROD;
7356 7354 break;
7357 7355 default:
7358 7356 un->un_node_type = DDI_NT_BLOCK_CHAN;
7359 7357 un->un_ctype = CTYPE_CCS;
7360 7358 break;
7361 7359 }
7362 7360
7363 7361 /*
7364 7362 * Try to read the interconnect type from the HBA.
7365 7363 *
7366 7364 * Note: This driver is currently compiled as two binaries, a parallel
7367 7365 * scsi version (sd) and a fibre channel version (ssd). All functional
7368 7366 * differences are determined at compile time. In the future a single
7369 7367 * binary will be provided and the interconnect type will be used to
7370 7368 * differentiate between fibre and parallel scsi behaviors. At that time
7371 7369 * it will be necessary for all fibre channel HBAs to support this
7372 7370 * property.
7373 7371 *
7374 7372 * set un_f_is_fiber to TRUE ( default fiber )
7375 7373 */
7376 7374 un->un_f_is_fibre = TRUE;
7377 7375 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7378 7376 case INTERCONNECT_SSA:
7379 7377 un->un_interconnect_type = SD_INTERCONNECT_SSA;
7380 7378 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7381 7379 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7382 7380 break;
7383 7381 case INTERCONNECT_PARALLEL:
7384 7382 un->un_f_is_fibre = FALSE;
7385 7383 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7386 7384 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7387 7385 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7388 7386 break;
7389 7387 case INTERCONNECT_SAS:
7390 7388 un->un_f_is_fibre = FALSE;
7391 7389 un->un_interconnect_type = SD_INTERCONNECT_SAS;
7392 7390 un->un_node_type = DDI_NT_BLOCK_SAS;
7393 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7394 7392 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7395 7393 break;
7396 7394 case INTERCONNECT_SATA:
7397 7395 un->un_f_is_fibre = FALSE;
7398 7396 un->un_interconnect_type = SD_INTERCONNECT_SATA;
7399 7397 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7400 7398 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7401 7399 break;
7402 7400 case INTERCONNECT_FIBRE:
7403 7401 un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7404 7402 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7405 7403 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7406 7404 break;
7407 7405 case INTERCONNECT_FABRIC:
7408 7406 un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7409 7407 un->un_node_type = DDI_NT_BLOCK_FABRIC;
7410 7408 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7411 7409 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7412 7410 break;
7413 7411 default:
7414 7412 #ifdef SD_DEFAULT_INTERCONNECT_TYPE
7415 7413 /*
7416 7414 * The HBA does not support the "interconnect-type" property
7417 7415 * (or did not provide a recognized type).
7418 7416 *
7419 7417 * Note: This will be obsoleted when a single fibre channel
7420 7418 * and parallel scsi driver is delivered. In the meantime the
7421 7419 * interconnect type will be set to the platform default.If that
7422 7420 * type is not parallel SCSI, it means that we should be
7423 7421 * assuming "ssd" semantics. However, here this also means that
7424 7422 * the FC HBA is not supporting the "interconnect-type" property
7425 7423 * like we expect it to, so log this occurrence.
7426 7424 */
7427 7425 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7428 7426 if (!SD_IS_PARALLEL_SCSI(un)) {
7429 7427 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7430 7428 "sd_unit_attach: un:0x%p Assuming "
7431 7429 "INTERCONNECT_FIBRE\n", un);
7432 7430 } else {
7433 7431 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7434 7432 "sd_unit_attach: un:0x%p Assuming "
7435 7433 "INTERCONNECT_PARALLEL\n", un);
7436 7434 un->un_f_is_fibre = FALSE;
7437 7435 }
7438 7436 #else
7439 7437 /*
7440 7438 * Note: This source will be implemented when a single fibre
7441 7439 * channel and parallel scsi driver is delivered. The default
7442 7440 * will be to assume that if a device does not support the
7443 7441 * "interconnect-type" property it is a parallel SCSI HBA and
7444 7442 * we will set the interconnect type for parallel scsi.
7445 7443 */
7446 7444 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7447 7445 un->un_f_is_fibre = FALSE;
7448 7446 #endif
7449 7447 break;
7450 7448 }
7451 7449
7452 7450 if (un->un_f_is_fibre == TRUE) {
7453 7451 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7454 7452 SCSI_VERSION_3) {
7455 7453 switch (un->un_interconnect_type) {
7456 7454 case SD_INTERCONNECT_FIBRE:
7457 7455 case SD_INTERCONNECT_SSA:
7458 7456 un->un_node_type = DDI_NT_BLOCK_WWN;
7459 7457 break;
7460 7458 default:
7461 7459 break;
7462 7460 }
7463 7461 }
7464 7462 }
7465 7463
7466 7464 /*
7467 7465 * Initialize the Request Sense command for the target
7468 7466 */
7469 7467 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7470 7468 goto alloc_rqs_failed;
7471 7469 }
7472 7470
7473 7471 /*
7474 7472 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7475 7473 * with separate binary for sd and ssd.
7476 7474 *
7477 7475 * x86 has 1 binary, un_retry_count is set base on connection type.
7478 7476 * The hardcoded values will go away when Sparc uses 1 binary
7479 7477 * for sd and ssd. This hardcoded values need to match
7480 7478 * SD_RETRY_COUNT in sddef.h
7481 7479 * The value used is base on interconnect type.
7482 7480 * fibre = 3, parallel = 5
7483 7481 */
7484 7482 #if defined(__i386) || defined(__amd64)
7485 7483 un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7486 7484 #else
7487 7485 un->un_retry_count = SD_RETRY_COUNT;
7488 7486 #endif
7489 7487
7490 7488 /*
7491 7489 * Set the per disk retry count to the default number of retries
7492 7490 * for disks and CDROMs. This value can be overridden by the
7493 7491 * disk property list or an entry in sd.conf.
7494 7492 */
7495 7493 un->un_notready_retry_count =
7496 7494 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7497 7495 : DISK_NOT_READY_RETRY_COUNT(un);
7498 7496
7499 7497 /*
7500 7498 * Set the busy retry count to the default value of un_retry_count.
7501 7499 * This can be overridden by entries in sd.conf or the device
7502 7500 * config table.
7503 7501 */
7504 7502 un->un_busy_retry_count = un->un_retry_count;
7505 7503
7506 7504 /*
7507 7505 * Init the reset threshold for retries. This number determines
7508 7506 * how many retries must be performed before a reset can be issued
7509 7507 * (for certain error conditions). This can be overridden by entries
7510 7508 * in sd.conf or the device config table.
7511 7509 */
7512 7510 un->un_reset_retry_count = (un->un_retry_count / 2);
7513 7511
7514 7512 /*
7515 7513 * Set the victim_retry_count to the default un_retry_count
7516 7514 */
7517 7515 un->un_victim_retry_count = (2 * un->un_retry_count);
7518 7516
7519 7517 /*
7520 7518 * Set the reservation release timeout to the default value of
7521 7519 * 5 seconds. This can be overridden by entries in ssd.conf or the
7522 7520 * device config table.
7523 7521 */
7524 7522 un->un_reserve_release_time = 5;
7525 7523
7526 7524 /*
7527 7525 * Set up the default maximum transfer size. Note that this may
7528 7526 * get updated later in the attach, when setting up default wide
7529 7527 * operations for disks.
7530 7528 */
7531 7529 #if defined(__i386) || defined(__amd64)
7532 7530 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7533 7531 un->un_partial_dma_supported = 1;
7534 7532 #else
7535 7533 un->un_max_xfer_size = (uint_t)maxphys;
7536 7534 #endif
7537 7535
7538 7536 /*
7539 7537 * Get "allow bus device reset" property (defaults to "enabled" if
7540 7538 * the property was not defined). This is to disable bus resets for
7541 7539 * certain kinds of error recovery. Note: In the future when a run-time
7542 7540 * fibre check is available the soft state flag should default to
7543 7541 * enabled.
7544 7542 */
7545 7543 if (un->un_f_is_fibre == TRUE) {
7546 7544 un->un_f_allow_bus_device_reset = TRUE;
7547 7545 } else {
7548 7546 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7549 7547 "allow-bus-device-reset", 1) != 0) {
7550 7548 un->un_f_allow_bus_device_reset = TRUE;
7551 7549 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7552 7550 "sd_unit_attach: un:0x%p Bus device reset "
7553 7551 "enabled\n", un);
7554 7552 } else {
7555 7553 un->un_f_allow_bus_device_reset = FALSE;
7556 7554 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7557 7555 "sd_unit_attach: un:0x%p Bus device reset "
7558 7556 "disabled\n", un);
7559 7557 }
7560 7558 }
7561 7559
7562 7560 /*
7563 7561 * Check if this is an ATAPI device. ATAPI devices use Group 1
7564 7562 * Read/Write commands and Group 2 Mode Sense/Select commands.
7565 7563 *
7566 7564 * Note: The "obsolete" way of doing this is to check for the "atapi"
7567 7565 * property. The new "variant" property with a value of "atapi" has been
7568 7566 * introduced so that future 'variants' of standard SCSI behavior (like
7569 7567 * atapi) could be specified by the underlying HBA drivers by supplying
7570 7568 * a new value for the "variant" property, instead of having to define a
7571 7569 * new property.
7572 7570 */
7573 7571 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7574 7572 un->un_f_cfg_is_atapi = TRUE;
7575 7573 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7576 7574 "sd_unit_attach: un:0x%p Atapi device\n", un);
7577 7575 }
7578 7576 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7579 7577 &variantp) == DDI_PROP_SUCCESS) {
7580 7578 if (strcmp(variantp, "atapi") == 0) {
7581 7579 un->un_f_cfg_is_atapi = TRUE;
7582 7580 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7583 7581 "sd_unit_attach: un:0x%p Atapi device\n", un);
7584 7582 }
7585 7583 ddi_prop_free(variantp);
7586 7584 }
7587 7585
7588 7586 un->un_cmd_timeout = SD_IO_TIME;
7589 7587
7590 7588 un->un_busy_timeout = SD_BSY_TIMEOUT;
7591 7589
7592 7590 /* Info on current states, statuses, etc. (Updated frequently) */
7593 7591 un->un_state = SD_STATE_NORMAL;
7594 7592 un->un_last_state = SD_STATE_NORMAL;
7595 7593
7596 7594 /* Control & status info for command throttling */
7597 7595 un->un_throttle = sd_max_throttle;
7598 7596 un->un_saved_throttle = sd_max_throttle;
7599 7597 un->un_min_throttle = sd_min_throttle;
7600 7598
7601 7599 if (un->un_f_is_fibre == TRUE) {
7602 7600 un->un_f_use_adaptive_throttle = TRUE;
7603 7601 } else {
7604 7602 un->un_f_use_adaptive_throttle = FALSE;
7605 7603 }
7606 7604
7607 7605 /* Removable media support. */
7608 7606 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7609 7607 un->un_mediastate = DKIO_NONE;
7610 7608 un->un_specified_mediastate = DKIO_NONE;
7611 7609
7612 7610 /* CVs for suspend/resume (PM or DR) */
7613 7611 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
7614 7612 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7615 7613
7616 7614 /* Power management support. */
7617 7615 un->un_power_level = SD_SPINDLE_UNINIT;
7618 7616
7619 7617 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
7620 7618 un->un_f_wcc_inprog = 0;
7621 7619
7622 7620 /*
7623 7621 * The open/close semaphore is used to serialize threads executing
7624 7622 * in the driver's open & close entry point routines for a given
7625 7623 * instance.
7626 7624 */
7627 7625 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
7628 7626
7629 7627 /*
7630 7628 * The conf file entry and softstate variable is a forceful override,
7631 7629 * meaning a non-zero value must be entered to change the default.
7632 7630 */
7633 7631 un->un_f_disksort_disabled = FALSE;
7634 7632 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
7635 7633 un->un_f_enable_rmw = FALSE;
7636 7634
7637 7635 /*
7638 7636 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
7639 7637 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7640 7638 */
7641 7639 un->un_f_mmc_gesn_polling = TRUE;
7642 7640
7643 7641 /*
7644 7642 * physical sector size defaults to DEV_BSIZE currently. We can
7645 7643 * override this value via the driver configuration file so we must
7646 7644 * set it before calling sd_read_unit_properties().
7647 7645 */
7648 7646 un->un_phy_blocksize = DEV_BSIZE;
7649 7647
7650 7648 /*
7651 7649 * Retrieve the properties from the static driver table or the driver
7652 7650 * configuration file (.conf) for this unit and update the soft state
7653 7651 * for the device as needed for the indicated properties.
7654 7652 * Note: the property configuration needs to occur here as some of the
7655 7653 * following routines may have dependencies on soft state flags set
7656 7654 * as part of the driver property configuration.
7657 7655 */
7658 7656 sd_read_unit_properties(un);
7659 7657 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7660 7658 "sd_unit_attach: un:0x%p property configuration complete.\n", un);
7661 7659
7662 7660 /*
7663 7661 * Only if a device has "hotpluggable" property, it is
7664 7662 * treated as hotpluggable device. Otherwise, it is
7665 7663 * regarded as non-hotpluggable one.
7666 7664 */
7667 7665 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7668 7666 -1) != -1) {
7669 7667 un->un_f_is_hotpluggable = TRUE;
7670 7668 }
7671 7669
7672 7670 /*
7673 7671 * set unit's attributes(flags) according to "hotpluggable" and
7674 7672 * RMB bit in INQUIRY data.
7675 7673 */
7676 7674 sd_set_unit_attributes(un, devi);
7677 7675
7678 7676 /*
7679 7677 * By default, we mark the capacity, lbasize, and geometry
7680 7678 * as invalid. Only if we successfully read a valid capacity
7681 7679 * will we update the un_blockcount and un_tgt_blocksize with the
7682 7680 * valid values (the geometry will be validated later).
7683 7681 */
7684 7682 un->un_f_blockcount_is_valid = FALSE;
7685 7683 un->un_f_tgt_blocksize_is_valid = FALSE;
7686 7684
7687 7685 /*
7688 7686 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7689 7687 * otherwise.
7690 7688 */
7691 7689 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
7692 7690 un->un_blockcount = 0;
7693 7691
7694 7692 /*
7695 7693 * Set up the per-instance info needed to determine the correct
7696 7694 * CDBs and other info for issuing commands to the target.
7697 7695 */
7698 7696 sd_init_cdb_limits(un);
7699 7697
7700 7698 /*
7701 7699 * Set up the IO chains to use, based upon the target type.
7702 7700 */
7703 7701 if (un->un_f_non_devbsize_supported) {
7704 7702 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
7705 7703 } else {
7706 7704 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
7707 7705 }
7708 7706 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
7709 7707 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
7710 7708 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
7711 7709
7712 7710 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
7713 7711 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
7714 7712 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
7715 7713 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
7716 7714
7717 7715
7718 7716 if (ISCD(un)) {
7719 7717 un->un_additional_codes = sd_additional_codes;
7720 7718 } else {
7721 7719 un->un_additional_codes = NULL;
7722 7720 }
7723 7721
7724 7722 /*
7725 7723 * Create the kstats here so they can be available for attach-time
7726 7724 * routines that send commands to the unit (either polled or via
7727 7725 * sd_send_scsi_cmd).
7728 7726 *
7729 7727 * Note: This is a critical sequence that needs to be maintained:
7730 7728 * 1) Instantiate the kstats here, before any routines using the
7731 7729 * iopath (i.e. sd_send_scsi_cmd).
7732 7730 * 2) Instantiate and initialize the partition stats
7733 7731 * (sd_set_pstats).
7734 7732 * 3) Initialize the error stats (sd_set_errstats), following
7735 7733 * sd_validate_geometry(),sd_register_devid(),
7736 7734 * and sd_cache_control().
7737 7735 */
7738 7736
7739 7737 un->un_stats = kstat_create(sd_label, instance,
7740 7738 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7741 7739 if (un->un_stats != NULL) {
7742 7740 un->un_stats->ks_lock = SD_MUTEX(un);
7743 7741 kstat_install(un->un_stats);
7744 7742 }
7745 7743 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7746 7744 "sd_unit_attach: un:0x%p un_stats created\n", un);
7747 7745
7748 7746 sd_create_errstats(un, instance);
7749 7747 if (un->un_errstats == NULL) {
7750 7748 goto create_errstats_failed;
7751 7749 }
7752 7750 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7753 7751 "sd_unit_attach: un:0x%p errstats created\n", un);
7754 7752
7755 7753 /*
7756 7754 * The following if/else code was relocated here from below as part
7757 7755 * of the fix for bug (4430280). However with the default setup added
7758 7756 * on entry to this routine, it's no longer absolutely necessary for
7759 7757 * this to be before the call to sd_spin_up_unit.
7760 7758 */
7761 7759 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7762 7760 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7763 7761 (devp->sd_inq->inq_ansi == 5)) &&
7764 7762 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
7765 7763
7766 7764 /*
7767 7765 * If tagged queueing is supported by the target
7768 7766 * and by the host adapter then we will enable it
7769 7767 */
7770 7768 un->un_tagflags = 0;
7771 7769 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7772 7770 (un->un_f_arq_enabled == TRUE)) {
7773 7771 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7774 7772 1, 1) == 1) {
7775 7773 un->un_tagflags = FLAG_STAG;
7776 7774 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7777 7775 "sd_unit_attach: un:0x%p tag queueing "
7778 7776 "enabled\n", un);
7779 7777 } else if (scsi_ifgetcap(SD_ADDRESS(un),
7780 7778 "untagged-qing", 0) == 1) {
7781 7779 un->un_f_opt_queueing = TRUE;
7782 7780 un->un_saved_throttle = un->un_throttle =
7783 7781 min(un->un_throttle, 3);
7784 7782 } else {
7785 7783 un->un_f_opt_queueing = FALSE;
7786 7784 un->un_saved_throttle = un->un_throttle = 1;
7787 7785 }
7788 7786 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7789 7787 == 1) && (un->un_f_arq_enabled == TRUE)) {
7790 7788 /* The Host Adapter supports internal queueing. */
7791 7789 un->un_f_opt_queueing = TRUE;
7792 7790 un->un_saved_throttle = un->un_throttle =
7793 7791 min(un->un_throttle, 3);
7794 7792 } else {
7795 7793 un->un_f_opt_queueing = FALSE;
7796 7794 un->un_saved_throttle = un->un_throttle = 1;
7797 7795 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7798 7796 "sd_unit_attach: un:0x%p no tag queueing\n", un);
7799 7797 }
7800 7798
7801 7799 /*
7802 7800 * Enable large transfers for SATA/SAS drives
7803 7801 */
7804 7802 if (SD_IS_SERIAL(un)) {
7805 7803 un->un_max_xfer_size =
7806 7804 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7807 7805 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7808 7806 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7809 7807 "sd_unit_attach: un:0x%p max transfer "
7810 7808 "size=0x%x\n", un, un->un_max_xfer_size);
7811 7809
7812 7810 }
7813 7811
7814 7812 /* Setup or tear down default wide operations for disks */
7815 7813
7816 7814 /*
7817 7815 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7818 7816 * and "ssd_max_xfer_size" to exist simultaneously on the same
7819 7817 * system and be set to different values. In the future this
7820 7818 * code may need to be updated when the ssd module is
7821 7819 * obsoleted and removed from the system. (4299588)
7822 7820 */
7823 7821 if (SD_IS_PARALLEL_SCSI(un) &&
7824 7822 (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7825 7823 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7826 7824 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7827 7825 1, 1) == 1) {
7828 7826 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7829 7827 "sd_unit_attach: un:0x%p Wide Transfer "
7830 7828 "enabled\n", un);
7831 7829 }
7832 7830
7833 7831 /*
7834 7832 * If tagged queuing has also been enabled, then
7835 7833 * enable large xfers
7836 7834 */
7837 7835 if (un->un_saved_throttle == sd_max_throttle) {
7838 7836 un->un_max_xfer_size =
7839 7837 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7840 7838 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7841 7839 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7842 7840 "sd_unit_attach: un:0x%p max transfer "
7843 7841 "size=0x%x\n", un, un->un_max_xfer_size);
7844 7842 }
7845 7843 } else {
7846 7844 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7847 7845 0, 1) == 1) {
7848 7846 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7849 7847 "sd_unit_attach: un:0x%p "
7850 7848 "Wide Transfer disabled\n", un);
7851 7849 }
7852 7850 }
7853 7851 } else {
7854 7852 un->un_tagflags = FLAG_STAG;
7855 7853 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7856 7854 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
7857 7855 }
7858 7856
7859 7857 /*
7860 7858 * If this target supports LUN reset, try to enable it.
7861 7859 */
7862 7860 if (un->un_f_lun_reset_enabled) {
7863 7861 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7864 7862 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7865 7863 "un:0x%p lun_reset capability set\n", un);
7866 7864 } else {
7867 7865 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7868 7866 "un:0x%p lun-reset capability not set\n", un);
7869 7867 }
7870 7868 }
7871 7869
7872 7870 /*
7873 7871 * Adjust the maximum transfer size. This is to fix
7874 7872 * the problem of partial DMA support on SPARC. Some
7875 7873 * HBA driver, like aac, has very small dma_attr_maxxfer
7876 7874 * size, which requires partial DMA support on SPARC.
7877 7875 * In the future the SPARC pci nexus driver may solve
7878 7876 * the problem instead of this fix.
7879 7877 */
7880 7878 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7881 7879 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7882 7880 /* We need DMA partial even on sparc to ensure sddump() works */
7883 7881 un->un_max_xfer_size = max_xfer_size;
7884 7882 if (un->un_partial_dma_supported == 0)
7885 7883 un->un_partial_dma_supported = 1;
7886 7884 }
7887 7885 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7888 7886 DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7889 7887 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7890 7888 un->un_max_xfer_size) == 1) {
7891 7889 un->un_buf_breakup_supported = 1;
7892 7890 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7893 7891 "un:0x%p Buf breakup enabled\n", un);
7894 7892 }
7895 7893 }
7896 7894
7897 7895 /*
7898 7896 * Set PKT_DMA_PARTIAL flag.
7899 7897 */
7900 7898 if (un->un_partial_dma_supported == 1) {
7901 7899 un->un_pkt_flags = PKT_DMA_PARTIAL;
7902 7900 } else {
7903 7901 un->un_pkt_flags = 0;
7904 7902 }
7905 7903
7906 7904 /* Initialize sd_ssc_t for internal uscsi commands */
7907 7905 ssc = sd_ssc_init(un);
7908 7906 scsi_fm_init(devp);
7909 7907
7910 7908 /*
7911 7909 * Allocate memory for SCSI FMA stuffs.
7912 7910 */
7913 7911 un->un_fm_private =
7914 7912 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7915 7913 sfip = (struct sd_fm_internal *)un->un_fm_private;
7916 7914 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7917 7915 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7918 7916 sfip->fm_ssc.ssc_un = un;
7919 7917
7920 7918 if (ISCD(un) ||
7921 7919 un->un_f_has_removable_media ||
7922 7920 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
7923 7921 /*
7924 7922 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7925 7923 * Their log are unchanged.
7926 7924 */
7927 7925 sfip->fm_log_level = SD_FM_LOG_NSUP;
7928 7926 } else {
7929 7927 /*
7930 7928 * If enter here, it should be non-CDROM and FM-capable
7931 7929 * device, and it will not keep the old scsi_log as before
7932 7930 * in /var/adm/messages. However, the property
7933 7931 * "fm-scsi-log" will control whether the FM telemetry will
7934 7932 * be logged in /var/adm/messages.
7935 7933 */
7936 7934 int fm_scsi_log;
7937 7935 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7938 7936 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7939 7937
7940 7938 if (fm_scsi_log)
7941 7939 sfip->fm_log_level = SD_FM_LOG_EREPORT;
7942 7940 else
7943 7941 sfip->fm_log_level = SD_FM_LOG_SILENT;
7944 7942 }
7945 7943
7946 7944 /*
7947 7945 * At this point in the attach, we have enough info in the
7948 7946 * soft state to be able to issue commands to the target.
7949 7947 *
7950 7948 * All command paths used below MUST issue their commands as
7951 7949 * SD_PATH_DIRECT. This is important as intermediate layers
7952 7950 * are not all initialized yet (such as PM).
7953 7951 */
7954 7952
7955 7953 /*
7956 7954 * Send a TEST UNIT READY command to the device. This should clear
7957 7955 * any outstanding UNIT ATTENTION that may be present.
7958 7956 *
7959 7957 * Note: Don't check for success, just track if there is a reservation,
7960 7958 * this is a throw away command to clear any unit attentions.
7961 7959 *
7962 7960 * Note: This MUST be the first command issued to the target during
7963 7961 * attach to ensure power on UNIT ATTENTIONS are cleared.
7964 7962 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7965 7963 * with attempts at spinning up a device with no media.
7966 7964 */
7967 7965 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
7968 7966 if (status != 0) {
7969 7967 if (status == EACCES)
7970 7968 reservation_flag = SD_TARGET_IS_RESERVED;
7971 7969 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
7972 7970 }
7973 7971
7974 7972 /*
7975 7973 * If the device is NOT a removable media device, attempt to spin
7976 7974 * it up (using the START_STOP_UNIT command) and read its capacity
7977 7975 * (using the READ CAPACITY command). Note, however, that either
7978 7976 * of these could fail and in some cases we would continue with
7979 7977 * the attach despite the failure (see below).
7980 7978 */
7981 7979 if (un->un_f_descr_format_supported) {
7982 7980
7983 7981 switch (sd_spin_up_unit(ssc)) {
7984 7982 case 0:
7985 7983 /*
7986 7984 * Spin-up was successful; now try to read the
7987 7985 * capacity. If successful then save the results
7988 7986 * and mark the capacity & lbasize as valid.
7989 7987 */
7990 7988 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7991 7989 "sd_unit_attach: un:0x%p spin-up successful\n", un);
7992 7990
7993 7991 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
7994 7992 &lbasize, SD_PATH_DIRECT);
7995 7993
7996 7994 switch (status) {
7997 7995 case 0: {
7998 7996 if (capacity > DK_MAX_BLOCKS) {
7999 7997 #ifdef _LP64
8000 7998 if ((capacity + 1) >
8001 7999 SD_GROUP1_MAX_ADDRESS) {
8002 8000 /*
8003 8001 * Enable descriptor format
8004 8002 * sense data so that we can
8005 8003 * get 64 bit sense data
8006 8004 * fields.
8007 8005 */
8008 8006 sd_enable_descr_sense(ssc);
8009 8007 }
8010 8008 #else
8011 8009 /* 32-bit kernels can't handle this */
8012 8010 scsi_log(SD_DEVINFO(un),
8013 8011 sd_label, CE_WARN,
8014 8012 "disk has %llu blocks, which "
8015 8013 "is too large for a 32-bit "
8016 8014 "kernel", capacity);
8017 8015
8018 8016 #if defined(__i386) || defined(__amd64)
8019 8017 /*
8020 8018 * 1TB disk was treated as (1T - 512)B
8021 8019 * in the past, so that it might have
8022 8020 * valid VTOC and solaris partitions,
8023 8021 * we have to allow it to continue to
8024 8022 * work.
8025 8023 */
8026 8024 if (capacity -1 > DK_MAX_BLOCKS)
8027 8025 #endif
8028 8026 goto spinup_failed;
8029 8027 #endif
8030 8028 }
8031 8029
8032 8030 /*
8033 8031 * Here it's not necessary to check the case:
8034 8032 * the capacity of the device is bigger than
8035 8033 * what the max hba cdb can support. Because
8036 8034 * sd_send_scsi_READ_CAPACITY will retrieve
8037 8035 * the capacity by sending USCSI command, which
8038 8036 * is constrained by the max hba cdb. Actually,
8039 8037 * sd_send_scsi_READ_CAPACITY will return
8040 8038 * EINVAL when using bigger cdb than required
8041 8039 * cdb length. Will handle this case in
8042 8040 * "case EINVAL".
8043 8041 */
8044 8042
8045 8043 /*
8046 8044 * The following relies on
8047 8045 * sd_send_scsi_READ_CAPACITY never
8048 8046 * returning 0 for capacity and/or lbasize.
8049 8047 */
8050 8048 sd_update_block_info(un, lbasize, capacity);
8051 8049
8052 8050 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8053 8051 "sd_unit_attach: un:0x%p capacity = %ld "
8054 8052 "blocks; lbasize= %ld.\n", un,
8055 8053 un->un_blockcount, un->un_tgt_blocksize);
8056 8054
8057 8055 break;
8058 8056 }
8059 8057 case EINVAL:
8060 8058 /*
8061 8059 * In the case where the max-cdb-length property
8062 8060 * is smaller than the required CDB length for
8063 8061 * a SCSI device, a target driver can fail to
8064 8062 * attach to that device.
8065 8063 */
8066 8064 scsi_log(SD_DEVINFO(un),
8067 8065 sd_label, CE_WARN,
8068 8066 "disk capacity is too large "
8069 8067 "for current cdb length");
8070 8068 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8071 8069
8072 8070 goto spinup_failed;
8073 8071 case EACCES:
8074 8072 /*
8075 8073 * Should never get here if the spin-up
8076 8074 * succeeded, but code it in anyway.
8077 8075 * From here, just continue with the attach...
8078 8076 */
8079 8077 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8080 8078 "sd_unit_attach: un:0x%p "
8081 8079 "sd_send_scsi_READ_CAPACITY "
8082 8080 "returned reservation conflict\n", un);
8083 8081 reservation_flag = SD_TARGET_IS_RESERVED;
8084 8082 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8085 8083 break;
8086 8084 default:
8087 8085 /*
8088 8086 * Likewise, should never get here if the
8089 8087 * spin-up succeeded. Just continue with
8090 8088 * the attach...
8091 8089 */
8092 8090 if (status == EIO)
8093 8091 sd_ssc_assessment(ssc,
8094 8092 SD_FMT_STATUS_CHECK);
8095 8093 else
8096 8094 sd_ssc_assessment(ssc,
8097 8095 SD_FMT_IGNORE);
8098 8096 break;
8099 8097 }
8100 8098 break;
8101 8099 case EACCES:
8102 8100 /*
8103 8101 * Device is reserved by another host. In this case
8104 8102 * we could not spin it up or read the capacity, but
8105 8103 * we continue with the attach anyway.
8106 8104 */
8107 8105 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8108 8106 "sd_unit_attach: un:0x%p spin-up reservation "
8109 8107 "conflict.\n", un);
8110 8108 reservation_flag = SD_TARGET_IS_RESERVED;
8111 8109 break;
8112 8110 default:
8113 8111 /* Fail the attach if the spin-up failed. */
8114 8112 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8115 8113 "sd_unit_attach: un:0x%p spin-up failed.", un);
8116 8114 goto spinup_failed;
8117 8115 }
8118 8116
8119 8117 }
8120 8118
8121 8119 /*
8122 8120 * Check to see if this is a MMC drive
8123 8121 */
8124 8122 if (ISCD(un)) {
8125 8123 sd_set_mmc_caps(ssc);
8126 8124 }
8127 8125
8128 8126 /*
8129 8127 * Add a zero-length attribute to tell the world we support
8130 8128 * kernel ioctls (for layered drivers)
8131 8129 */
8132 8130 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8133 8131 DDI_KERNEL_IOCTL, NULL, 0);
8134 8132
8135 8133 /*
8136 8134 * Add a boolean property to tell the world we support
8137 8135 * the B_FAILFAST flag (for layered drivers)
8138 8136 */
8139 8137 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8140 8138 "ddi-failfast-supported", NULL, 0);
8141 8139
8142 8140 /*
8143 8141 * Initialize power management
8144 8142 */
8145 8143 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8146 8144 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
8147 8145 sd_setup_pm(ssc, devi);
8148 8146 if (un->un_f_pm_is_enabled == FALSE) {
8149 8147 /*
8150 8148 * For performance, point to a jump table that does
8151 8149 * not include pm.
8152 8150 * The direct and priority chains don't change with PM.
8153 8151 *
8154 8152 * Note: this is currently done based on individual device
8155 8153 * capabilities. When an interface for determining system
8156 8154 * power enabled state becomes available, or when additional
8157 8155 * layers are added to the command chain, these values will
8158 8156 * have to be re-evaluated for correctness.
8159 8157 */
8160 8158 if (un->un_f_non_devbsize_supported) {
8161 8159 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
8162 8160 } else {
8163 8161 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
8164 8162 }
8165 8163 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
8166 8164 }
8167 8165
8168 8166 /*
8169 8167 * This property is set to 0 by HA software to avoid retries
8170 8168 * on a reserved disk. (The preferred property name is
8171 8169 * "retry-on-reservation-conflict") (1189689)
8172 8170 *
8173 8171 * Note: The use of a global here can have unintended consequences. A
8174 8172 * per instance variable is preferable to match the capabilities of
8175 8173 * different underlying hba's (4402600)
8176 8174 */
8177 8175 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8178 8176 DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8179 8177 sd_retry_on_reservation_conflict);
8180 8178 if (sd_retry_on_reservation_conflict != 0) {
8181 8179 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8182 8180 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
8183 8181 sd_retry_on_reservation_conflict);
8184 8182 }
8185 8183
8186 8184 /* Set up options for QFULL handling. */
8187 8185 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8188 8186 "qfull-retries", -1)) != -1) {
8189 8187 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8190 8188 rval, 1);
8191 8189 }
8192 8190 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8193 8191 "qfull-retry-interval", -1)) != -1) {
8194 8192 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
8195 8193 rval, 1);
8196 8194 }
8197 8195
8198 8196 /*
8199 8197 * This just prints a message that announces the existence of the
8200 8198 * device. The message is always printed in the system logfile, but
8201 8199 * only appears on the console if the system is booted with the
8202 8200 * -v (verbose) argument.
8203 8201 */
8204 8202 ddi_report_dev(devi);
8205 8203
8206 8204 un->un_mediastate = DKIO_NONE;
8207 8205
8208 8206 /*
8209 8207 * Check if this is a SSD(Solid State Drive).
8210 8208 */
8211 8209 sd_check_solid_state(ssc);
8212 8210
8213 8211 /*
8214 8212 * Check whether the drive is in emulation mode.
8215 8213 */
8216 8214 sd_check_emulation_mode(ssc);
8217 8215
8218 8216 cmlb_alloc_handle(&un->un_cmlbhandle);
8219 8217
8220 8218 #if defined(__i386) || defined(__amd64)
8221 8219 /*
8222 8220 * On x86, compensate for off-by-1 legacy error
8223 8221 */
8224 8222 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8225 8223 (lbasize == un->un_sys_blocksize))
8226 8224 offbyone = CMLB_OFF_BY_ONE;
8227 8225 #endif
8228 8226
8229 8227 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8230 8228 VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8231 8229 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8232 8230 un->un_node_type, offbyone, un->un_cmlbhandle,
8233 8231 (void *)SD_PATH_DIRECT) != 0) {
8234 8232 goto cmlb_attach_failed;
8235 8233 }
8236 8234
8237 8235
8238 8236 /*
8239 8237 * Read and validate the device's geometry (ie, disk label)
8240 8238 * A new unformatted drive will not have a valid geometry, but
8241 8239 * the driver needs to successfully attach to this device so
8242 8240 * the drive can be formatted via ioctls.
8243 8241 */
8244 8242 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8245 8243 (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8246 8244
8247 8245 mutex_enter(SD_MUTEX(un));
8248 8246
8249 8247 /*
8250 8248 * Read and initialize the devid for the unit.
8251 8249 */
8252 8250 if (un->un_f_devid_supported) {
8253 8251 sd_register_devid(ssc, devi, reservation_flag);
8254 8252 }
8255 8253 mutex_exit(SD_MUTEX(un));
8256 8254
8257 8255 #if (defined(__fibre))
8258 8256 /*
8259 8257 * Register callbacks for fibre only. You can't do this solely
8260 8258 * on the basis of the devid_type because this is hba specific.
8261 8259 * We need to query our hba capabilities to find out whether to
8262 8260 * register or not.
8263 8261 */
8264 8262 if (un->un_f_is_fibre) {
8265 8263 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8266 8264 sd_init_event_callbacks(un);
8267 8265 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8268 8266 "sd_unit_attach: un:0x%p event callbacks inserted",
8269 8267 un);
8270 8268 }
8271 8269 }
8272 8270 #endif
8273 8271
8274 8272 if (un->un_f_opt_disable_cache == TRUE) {
8275 8273 /*
8276 8274 * Disable both read cache and write cache. This is
8277 8275 * the historic behavior of the keywords in the config file.
8278 8276 */
8279 8277 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8280 8278 0) {
8281 8279 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8282 8280 "sd_unit_attach: un:0x%p Could not disable "
8283 8281 "caching", un);
8284 8282 goto devid_failed;
8285 8283 }
8286 8284 }
8287 8285
8288 8286 /*
8289 8287 * Check the value of the WCE bit now and
8290 8288 * set un_f_write_cache_enabled accordingly.
8291 8289 */
8292 8290 (void) sd_get_write_cache_enabled(ssc, &wc_enabled);
8293 8291 mutex_enter(SD_MUTEX(un));
8294 8292 un->un_f_write_cache_enabled = (wc_enabled != 0);
8295 8293 mutex_exit(SD_MUTEX(un));
8296 8294
8297 8295 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
8298 8296 un->un_tgt_blocksize != DEV_BSIZE) ||
8299 8297 un->un_f_enable_rmw) {
8300 8298 if (!(un->un_wm_cache)) {
8301 8299 (void) snprintf(name_str, sizeof (name_str),
8302 8300 "%s%d_cache",
8303 8301 ddi_driver_name(SD_DEVINFO(un)),
8304 8302 ddi_get_instance(SD_DEVINFO(un)));
8305 8303 un->un_wm_cache = kmem_cache_create(
8306 8304 name_str, sizeof (struct sd_w_map),
8307 8305 8, sd_wm_cache_constructor,
8308 8306 sd_wm_cache_destructor, NULL,
8309 8307 (void *)un, NULL, 0);
8310 8308 if (!(un->un_wm_cache)) {
8311 8309 goto wm_cache_failed;
8312 8310 }
8313 8311 }
8314 8312 }
8315 8313
8316 8314 /*
8317 8315 * Check the value of the NV_SUP bit and set
8318 8316 * un_f_suppress_cache_flush accordingly.
8319 8317 */
8320 8318 sd_get_nv_sup(ssc);
8321 8319
8322 8320 /*
8323 8321 * Find out what type of reservation this disk supports.
8324 8322 */
8325 8323 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
8326 8324
8327 8325 switch (status) {
8328 8326 case 0:
8329 8327 /*
8330 8328 * SCSI-3 reservations are supported.
8331 8329 */
8332 8330 un->un_reservation_type = SD_SCSI3_RESERVATION;
8333 8331 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8334 8332 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
8335 8333 break;
8336 8334 case ENOTSUP:
8337 8335 /*
8338 8336 * The PERSISTENT RESERVE IN command would not be recognized by
8339 8337 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8340 8338 */
8341 8339 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8342 8340 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
8343 8341 un->un_reservation_type = SD_SCSI2_RESERVATION;
8344 8342
8345 8343 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8346 8344 break;
8347 8345 default:
8348 8346 /*
8349 8347 * default to SCSI-3 reservations
8350 8348 */
8351 8349 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8352 8350 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
8353 8351 un->un_reservation_type = SD_SCSI3_RESERVATION;
8354 8352
8355 8353 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8356 8354 break;
8357 8355 }
8358 8356
8359 8357 /*
8360 8358 * Set the pstat and error stat values here, so data obtained during the
8361 8359 * previous attach-time routines is available.
8362 8360 *
8363 8361 * Note: This is a critical sequence that needs to be maintained:
8364 8362 * 1) Instantiate the kstats before any routines using the iopath
8365 8363 * (i.e. sd_send_scsi_cmd).
8366 8364 * 2) Initialize the error stats (sd_set_errstats) and partition
8367 8365 * stats (sd_set_pstats)here, following
8368 8366 * cmlb_validate_geometry(), sd_register_devid(), and
8369 8367 * sd_cache_control().
8370 8368 */
8371 8369
8372 8370 if (un->un_f_pkstats_enabled && geom_label_valid) {
8373 8371 sd_set_pstats(un);
8374 8372 SD_TRACE(SD_LOG_IO_PARTITION, un,
8375 8373 "sd_unit_attach: un:0x%p pstats created and set\n", un);
8376 8374 }
8377 8375
8378 8376 sd_set_errstats(un);
8379 8377 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8380 8378 "sd_unit_attach: un:0x%p errstats set\n", un);
8381 8379
8382 8380
8383 8381 /*
8384 8382 * After successfully attaching an instance, we record the information
8385 8383 * of how many luns have been attached on the relative target and
8386 8384 * controller for parallel SCSI. This information is used when sd tries
8387 8385 * to set the tagged queuing capability in HBA.
8388 8386 */
8389 8387 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8390 8388 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8391 8389 }
8392 8390
8393 8391 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8394 8392 "sd_unit_attach: un:0x%p exit success\n", un);
8395 8393
8396 8394 /* Uninitialize sd_ssc_t pointer */
8397 8395 sd_ssc_fini(ssc);
8398 8396
8399 8397 return (DDI_SUCCESS);
8400 8398
8401 8399 /*
8402 8400 * An error occurred during the attach; clean up & return failure.
8403 8401 */
8404 8402 wm_cache_failed:
8405 8403 devid_failed:
8406 8404
8407 8405 setup_pm_failed:
8408 8406 ddi_remove_minor_node(devi, NULL);
8409 8407
8410 8408 cmlb_attach_failed:
8411 8409 /*
8412 8410 * Cleanup from the scsi_ifsetcap() calls (437868)
8413 8411 */
8414 8412 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8415 8413 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8416 8414
8417 8415 /*
8418 8416 * Refer to the comments of setting tagged-qing in the beginning of
8419 8417 * sd_unit_attach. We can only disable tagged queuing when there is
8420 8418 * no lun attached on the target.
8421 8419 */
8422 8420 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8423 8421 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8424 8422 }
8425 8423
8426 8424 if (un->un_f_is_fibre == FALSE) {
8427 8425 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8428 8426 }
8429 8427
8430 8428 spinup_failed:
8431 8429
8432 8430 /* Uninitialize sd_ssc_t pointer */
8433 8431 sd_ssc_fini(ssc);
8434 8432
8435 8433 mutex_enter(SD_MUTEX(un));
8436 8434
8437 8435 /* Deallocate SCSI FMA memory spaces */
8438 8436 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8439 8437
8440 8438 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8441 8439 if (un->un_direct_priority_timeid != NULL) {
8442 8440 timeout_id_t temp_id = un->un_direct_priority_timeid;
8443 8441 un->un_direct_priority_timeid = NULL;
8444 8442 mutex_exit(SD_MUTEX(un));
8445 8443 (void) untimeout(temp_id);
8446 8444 mutex_enter(SD_MUTEX(un));
8447 8445 }
8448 8446
8449 8447 /* Cancel any pending start/stop timeouts */
8450 8448 if (un->un_startstop_timeid != NULL) {
8451 8449 timeout_id_t temp_id = un->un_startstop_timeid;
8452 8450 un->un_startstop_timeid = NULL;
8453 8451 mutex_exit(SD_MUTEX(un));
8454 8452 (void) untimeout(temp_id);
8455 8453 mutex_enter(SD_MUTEX(un));
8456 8454 }
8457 8455
8458 8456 /* Cancel any pending reset-throttle timeouts */
8459 8457 if (un->un_reset_throttle_timeid != NULL) {
8460 8458 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8461 8459 un->un_reset_throttle_timeid = NULL;
8462 8460 mutex_exit(SD_MUTEX(un));
8463 8461 (void) untimeout(temp_id);
8464 8462 mutex_enter(SD_MUTEX(un));
8465 8463 }
8466 8464
8467 8465 /* Cancel rmw warning message timeouts */
8468 8466 if (un->un_rmw_msg_timeid != NULL) {
8469 8467 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8470 8468 un->un_rmw_msg_timeid = NULL;
8471 8469 mutex_exit(SD_MUTEX(un));
8472 8470 (void) untimeout(temp_id);
8473 8471 mutex_enter(SD_MUTEX(un));
8474 8472 }
8475 8473
8476 8474 /* Cancel any pending retry timeouts */
8477 8475 if (un->un_retry_timeid != NULL) {
8478 8476 timeout_id_t temp_id = un->un_retry_timeid;
8479 8477 un->un_retry_timeid = NULL;
8480 8478 mutex_exit(SD_MUTEX(un));
8481 8479 (void) untimeout(temp_id);
8482 8480 mutex_enter(SD_MUTEX(un));
8483 8481 }
8484 8482
8485 8483 /* Cancel any pending delayed cv broadcast timeouts */
8486 8484 if (un->un_dcvb_timeid != NULL) {
8487 8485 timeout_id_t temp_id = un->un_dcvb_timeid;
8488 8486 un->un_dcvb_timeid = NULL;
8489 8487 mutex_exit(SD_MUTEX(un));
8490 8488 (void) untimeout(temp_id);
8491 8489 mutex_enter(SD_MUTEX(un));
8492 8490 }
8493 8491
8494 8492 mutex_exit(SD_MUTEX(un));
8495 8493
8496 8494 /* There should not be any in-progress I/O so ASSERT this check */
8497 8495 ASSERT(un->un_ncmds_in_transport == 0);
8498 8496 ASSERT(un->un_ncmds_in_driver == 0);
8499 8497
8500 8498 /* Do not free the softstate if the callback routine is active */
8501 8499 sd_sync_with_callback(un);
8502 8500
8503 8501 /*
8504 8502 * Partition stats apparently are not used with removables. These would
8505 8503 * not have been created during attach, so no need to clean them up...
8506 8504 */
8507 8505 if (un->un_errstats != NULL) {
8508 8506 kstat_delete(un->un_errstats);
8509 8507 un->un_errstats = NULL;
8510 8508 }
8511 8509
8512 8510 create_errstats_failed:
8513 8511
8514 8512 if (un->un_stats != NULL) {
8515 8513 kstat_delete(un->un_stats);
8516 8514 un->un_stats = NULL;
8517 8515 }
8518 8516
8519 8517 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8520 8518 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8521 8519
8522 8520 ddi_prop_remove_all(devi);
8523 8521 sema_destroy(&un->un_semoclose);
8524 8522 cv_destroy(&un->un_state_cv);
8525 8523
8526 8524 getrbuf_failed:
8527 8525
8528 8526 sd_free_rqs(un);
8529 8527
8530 8528 alloc_rqs_failed:
8531 8529
8532 8530 devp->sd_private = NULL;
8533 8531 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
8534 8532
8535 8533 get_softstate_failed:
8536 8534 /*
8537 8535 * Note: the man pages are unclear as to whether or not doing a
8538 8536 * ddi_soft_state_free(sd_state, instance) is the right way to
8539 8537 * clean up after the ddi_soft_state_zalloc() if the subsequent
8540 8538 * ddi_get_soft_state() fails. The implication seems to be
8541 8539 * that the get_soft_state cannot fail if the zalloc succeeds.
8542 8540 */
8543 8541 #ifndef XPV_HVM_DRIVER
8544 8542 ddi_soft_state_free(sd_state, instance);
8545 8543 #endif /* !XPV_HVM_DRIVER */
8546 8544
8547 8545 probe_failed:
8548 8546 scsi_unprobe(devp);
8549 8547
8550 8548 return (DDI_FAILURE);
8551 8549 }
8552 8550
8553 8551
8554 8552 /*
8555 8553 * Function: sd_unit_detach
8556 8554 *
8557 8555 * Description: Performs DDI_DETACH processing for sddetach().
8558 8556 *
8559 8557 * Return Code: DDI_SUCCESS
8560 8558 * DDI_FAILURE
8561 8559 *
8562 8560 * Context: Kernel thread context
8563 8561 */
8564 8562
8565 8563 static int
8566 8564 sd_unit_detach(dev_info_t *devi)
8567 8565 {
8568 8566 struct scsi_device *devp;
8569 8567 struct sd_lun *un;
8570 8568 int i;
8571 8569 int tgt;
8572 8570 dev_t dev;
8573 8571 dev_info_t *pdip = ddi_get_parent(devi);
8574 8572 #ifndef XPV_HVM_DRIVER
8575 8573 int instance = ddi_get_instance(devi);
8576 8574 #endif /* !XPV_HVM_DRIVER */
8577 8575
8578 8576 mutex_enter(&sd_detach_mutex);
8579 8577
8580 8578 /*
8581 8579 * Fail the detach for any of the following:
8582 8580 * - Unable to get the sd_lun struct for the instance
8583 8581 * - A layered driver has an outstanding open on the instance
8584 8582 * - Another thread is already detaching this instance
8585 8583 * - Another thread is currently performing an open
8586 8584 */
8587 8585 devp = ddi_get_driver_private(devi);
8588 8586 if ((devp == NULL) ||
8589 8587 ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8590 8588 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8591 8589 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
8592 8590 mutex_exit(&sd_detach_mutex);
8593 8591 return (DDI_FAILURE);
8594 8592 }
8595 8593
8596 8594 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8597 8595
8598 8596 /*
8599 8597 * Mark this instance as currently in a detach, to inhibit any
8600 8598 * opens from a layered driver.
8601 8599 */
8602 8600 un->un_detach_count++;
8603 8601 mutex_exit(&sd_detach_mutex);
8604 8602
8605 8603 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8606 8604 SCSI_ADDR_PROP_TARGET, -1);
8607 8605
8608 8606 dev = sd_make_device(SD_DEVINFO(un));
8609 8607
8610 8608 #ifndef lint
8611 8609 _NOTE(COMPETING_THREADS_NOW);
8612 8610 #endif
8613 8611
8614 8612 mutex_enter(SD_MUTEX(un));
8615 8613
8616 8614 /*
8617 8615 * Fail the detach if there are any outstanding layered
8618 8616 * opens on this device.
8619 8617 */
8620 8618 for (i = 0; i < NDKMAP; i++) {
8621 8619 if (un->un_ocmap.lyropen[i] != 0) {
8622 8620 goto err_notclosed;
8623 8621 }
8624 8622 }
8625 8623
8626 8624 /*
8627 8625 * Verify there are NO outstanding commands issued to this device.
8628 8626 * ie, un_ncmds_in_transport == 0.
8629 8627 * It's possible to have outstanding commands through the physio
8630 8628 * code path, even though everything's closed.
8631 8629 */
8632 8630 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8633 8631 (un->un_direct_priority_timeid != NULL) ||
8634 8632 (un->un_state == SD_STATE_RWAIT)) {
8635 8633 mutex_exit(SD_MUTEX(un));
8636 8634 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8637 8635 "sd_dr_detach: Detach failure due to outstanding cmds\n");
8638 8636 goto err_stillbusy;
8639 8637 }
8640 8638
8641 8639 /*
8642 8640 * If we have the device reserved, release the reservation.
8643 8641 */
8644 8642 if ((un->un_resvd_status & SD_RESERVE) &&
8645 8643 !(un->un_resvd_status & SD_LOST_RESERVE)) {
8646 8644 mutex_exit(SD_MUTEX(un));
8647 8645 /*
8648 8646 * Note: sd_reserve_release sends a command to the device
8649 8647 * via the sd_ioctlcmd() path, and can sleep.
8650 8648 */
8651 8649 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8652 8650 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8653 8651 "sd_dr_detach: Cannot release reservation \n");
8654 8652 }
8655 8653 } else {
8656 8654 mutex_exit(SD_MUTEX(un));
8657 8655 }
8658 8656
8659 8657 /*
8660 8658 * Untimeout any reserve recover, throttle reset, restart unit
8661 8659 * and delayed broadcast timeout threads. Protect the timeout pointer
8662 8660 * from getting nulled by their callback functions.
8663 8661 */
8664 8662 mutex_enter(SD_MUTEX(un));
8665 8663 if (un->un_resvd_timeid != NULL) {
8666 8664 timeout_id_t temp_id = un->un_resvd_timeid;
8667 8665 un->un_resvd_timeid = NULL;
8668 8666 mutex_exit(SD_MUTEX(un));
8669 8667 (void) untimeout(temp_id);
8670 8668 mutex_enter(SD_MUTEX(un));
8671 8669 }
8672 8670
8673 8671 if (un->un_reset_throttle_timeid != NULL) {
8674 8672 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8675 8673 un->un_reset_throttle_timeid = NULL;
8676 8674 mutex_exit(SD_MUTEX(un));
8677 8675 (void) untimeout(temp_id);
8678 8676 mutex_enter(SD_MUTEX(un));
8679 8677 }
8680 8678
8681 8679 if (un->un_startstop_timeid != NULL) {
8682 8680 timeout_id_t temp_id = un->un_startstop_timeid;
8683 8681 un->un_startstop_timeid = NULL;
8684 8682 mutex_exit(SD_MUTEX(un));
8685 8683 (void) untimeout(temp_id);
8686 8684 mutex_enter(SD_MUTEX(un));
8687 8685 }
8688 8686
8689 8687 if (un->un_rmw_msg_timeid != NULL) {
8690 8688 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8691 8689 un->un_rmw_msg_timeid = NULL;
8692 8690 mutex_exit(SD_MUTEX(un));
8693 8691 (void) untimeout(temp_id);
8694 8692 mutex_enter(SD_MUTEX(un));
8695 8693 }
8696 8694
8697 8695 if (un->un_dcvb_timeid != NULL) {
8698 8696 timeout_id_t temp_id = un->un_dcvb_timeid;
8699 8697 un->un_dcvb_timeid = NULL;
8700 8698 mutex_exit(SD_MUTEX(un));
8701 8699 (void) untimeout(temp_id);
8702 8700 } else {
8703 8701 mutex_exit(SD_MUTEX(un));
8704 8702 }
8705 8703
8706 8704 /* Remove any pending reservation reclaim requests for this device */
8707 8705 sd_rmv_resv_reclaim_req(dev);
8708 8706
8709 8707 mutex_enter(SD_MUTEX(un));
8710 8708
8711 8709 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8712 8710 if (un->un_direct_priority_timeid != NULL) {
8713 8711 timeout_id_t temp_id = un->un_direct_priority_timeid;
8714 8712 un->un_direct_priority_timeid = NULL;
8715 8713 mutex_exit(SD_MUTEX(un));
8716 8714 (void) untimeout(temp_id);
8717 8715 mutex_enter(SD_MUTEX(un));
8718 8716 }
8719 8717
8720 8718 /* Cancel any active multi-host disk watch thread requests */
8721 8719 if (un->un_mhd_token != NULL) {
8722 8720 mutex_exit(SD_MUTEX(un));
8723 8721 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8724 8722 if (scsi_watch_request_terminate(un->un_mhd_token,
8725 8723 SCSI_WATCH_TERMINATE_NOWAIT)) {
8726 8724 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8727 8725 "sd_dr_detach: Cannot cancel mhd watch request\n");
8728 8726 /*
8729 8727 * Note: We are returning here after having removed
8730 8728 * some driver timeouts above. This is consistent with
8731 8729 * the legacy implementation but perhaps the watch
8732 8730 * terminate call should be made with the wait flag set.
8733 8731 */
8734 8732 goto err_stillbusy;
8735 8733 }
8736 8734 mutex_enter(SD_MUTEX(un));
8737 8735 un->un_mhd_token = NULL;
8738 8736 }
8739 8737
8740 8738 if (un->un_swr_token != NULL) {
8741 8739 mutex_exit(SD_MUTEX(un));
8742 8740 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8743 8741 if (scsi_watch_request_terminate(un->un_swr_token,
8744 8742 SCSI_WATCH_TERMINATE_NOWAIT)) {
8745 8743 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8746 8744 "sd_dr_detach: Cannot cancel swr watch request\n");
8747 8745 /*
8748 8746 * Note: We are returning here after having removed
8749 8747 * some driver timeouts above. This is consistent with
8750 8748 * the legacy implementation but perhaps the watch
8751 8749 * terminate call should be made with the wait flag set.
8752 8750 */
8753 8751 goto err_stillbusy;
8754 8752 }
8755 8753 mutex_enter(SD_MUTEX(un));
8756 8754 un->un_swr_token = NULL;
8757 8755 }
8758 8756
8759 8757 mutex_exit(SD_MUTEX(un));
8760 8758
8761 8759 /*
8762 8760 * Clear any scsi_reset_notifies. We clear the reset notifies
8763 8761 * if we have not registered one.
8764 8762 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8765 8763 */
8766 8764 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8767 8765 sd_mhd_reset_notify_cb, (caddr_t)un);
8768 8766
8769 8767 /*
8770 8768 * protect the timeout pointers from getting nulled by
8771 8769 * their callback functions during the cancellation process.
8772 8770 * In such a scenario untimeout can be invoked with a null value.
8773 8771 */
8774 8772 _NOTE(NO_COMPETING_THREADS_NOW);
8775 8773
8776 8774 mutex_enter(&un->un_pm_mutex);
8777 8775 if (un->un_pm_idle_timeid != NULL) {
8778 8776 timeout_id_t temp_id = un->un_pm_idle_timeid;
8779 8777 un->un_pm_idle_timeid = NULL;
8780 8778 mutex_exit(&un->un_pm_mutex);
8781 8779
8782 8780 /*
8783 8781 * Timeout is active; cancel it.
8784 8782 * Note that it'll never be active on a device
8785 8783 * that does not support PM therefore we don't
8786 8784 * have to check before calling pm_idle_component.
8787 8785 */
8788 8786 (void) untimeout(temp_id);
8789 8787 (void) pm_idle_component(SD_DEVINFO(un), 0);
8790 8788 mutex_enter(&un->un_pm_mutex);
8791 8789 }
8792 8790
8793 8791 /*
8794 8792 * Check whether there is already a timeout scheduled for power
8795 8793 * management. If yes then don't lower the power here, that's.
8796 8794 * the timeout handler's job.
8797 8795 */
8798 8796 if (un->un_pm_timeid != NULL) {
8799 8797 timeout_id_t temp_id = un->un_pm_timeid;
8800 8798 un->un_pm_timeid = NULL;
8801 8799 mutex_exit(&un->un_pm_mutex);
8802 8800 /*
8803 8801 * Timeout is active; cancel it.
8804 8802 * Note that it'll never be active on a device
8805 8803 * that does not support PM therefore we don't
8806 8804 * have to check before calling pm_idle_component.
8807 8805 */
8808 8806 (void) untimeout(temp_id);
8809 8807 (void) pm_idle_component(SD_DEVINFO(un), 0);
8810 8808
8811 8809 } else {
8812 8810 mutex_exit(&un->un_pm_mutex);
8813 8811 if ((un->un_f_pm_is_enabled == TRUE) &&
8814 8812 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8815 8813 != DDI_SUCCESS)) {
8816 8814 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8817 8815 "sd_dr_detach: Lower power request failed, ignoring.\n");
8818 8816 /*
8819 8817 * Fix for bug: 4297749, item # 13
8820 8818 * The above test now includes a check to see if PM is
8821 8819 * supported by this device before call
8822 8820 * pm_lower_power().
8823 8821 * Note, the following is not dead code. The call to
8824 8822 * pm_lower_power above will generate a call back into
8825 8823 * our sdpower routine which might result in a timeout
8826 8824 * handler getting activated. Therefore the following
8827 8825 * code is valid and necessary.
8828 8826 */
8829 8827 mutex_enter(&un->un_pm_mutex);
8830 8828 if (un->un_pm_timeid != NULL) {
8831 8829 timeout_id_t temp_id = un->un_pm_timeid;
8832 8830 un->un_pm_timeid = NULL;
8833 8831 mutex_exit(&un->un_pm_mutex);
8834 8832 (void) untimeout(temp_id);
8835 8833 (void) pm_idle_component(SD_DEVINFO(un), 0);
8836 8834 } else {
8837 8835 mutex_exit(&un->un_pm_mutex);
8838 8836 }
8839 8837 }
8840 8838 }
8841 8839
8842 8840 /*
8843 8841 * Cleanup from the scsi_ifsetcap() calls (437868)
8844 8842 * Relocated here from above to be after the call to
8845 8843 * pm_lower_power, which was getting errors.
8846 8844 */
8847 8845 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8848 8846 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8849 8847
8850 8848 /*
8851 8849 * Currently, tagged queuing is supported per target based by HBA.
8852 8850 * Setting this per lun instance actually sets the capability of this
8853 8851 * target in HBA, which affects those luns already attached on the
8854 8852 * same target. So during detach, we can only disable this capability
8855 8853 * only when this is the only lun left on this target. By doing
8856 8854 * this, we assume a target has the same tagged queuing capability
8857 8855 * for every lun. The condition can be removed when HBA is changed to
8858 8856 * support per lun based tagged queuing capability.
8859 8857 */
8860 8858 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
8861 8859 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8862 8860 }
8863 8861
8864 8862 if (un->un_f_is_fibre == FALSE) {
8865 8863 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8866 8864 }
8867 8865
8868 8866 /*
8869 8867 * Remove any event callbacks, fibre only
8870 8868 */
8871 8869 if (un->un_f_is_fibre == TRUE) {
8872 8870 if ((un->un_insert_event != NULL) &&
8873 8871 (ddi_remove_event_handler(un->un_insert_cb_id) !=
8874 8872 DDI_SUCCESS)) {
8875 8873 /*
8876 8874 * Note: We are returning here after having done
8877 8875 * substantial cleanup above. This is consistent
8878 8876 * with the legacy implementation but this may not
8879 8877 * be the right thing to do.
8880 8878 */
8881 8879 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8882 8880 "sd_dr_detach: Cannot cancel insert event\n");
8883 8881 goto err_remove_event;
8884 8882 }
8885 8883 un->un_insert_event = NULL;
8886 8884
8887 8885 if ((un->un_remove_event != NULL) &&
8888 8886 (ddi_remove_event_handler(un->un_remove_cb_id) !=
8889 8887 DDI_SUCCESS)) {
8890 8888 /*
8891 8889 * Note: We are returning here after having done
8892 8890 * substantial cleanup above. This is consistent
8893 8891 * with the legacy implementation but this may not
8894 8892 * be the right thing to do.
8895 8893 */
8896 8894 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8897 8895 "sd_dr_detach: Cannot cancel remove event\n");
8898 8896 goto err_remove_event;
8899 8897 }
8900 8898 un->un_remove_event = NULL;
8901 8899 }
8902 8900
8903 8901 /* Do not free the softstate if the callback routine is active */
8904 8902 sd_sync_with_callback(un);
8905 8903
8906 8904 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8907 8905 cmlb_free_handle(&un->un_cmlbhandle);
8908 8906
8909 8907 /*
8910 8908 * Hold the detach mutex here, to make sure that no other threads ever
8911 8909 * can access a (partially) freed soft state structure.
8912 8910 */
8913 8911 mutex_enter(&sd_detach_mutex);
8914 8912
8915 8913 /*
8916 8914 * Clean up the soft state struct.
8917 8915 * Cleanup is done in reverse order of allocs/inits.
8918 8916 * At this point there should be no competing threads anymore.
8919 8917 */
8920 8918
8921 8919 scsi_fm_fini(devp);
8922 8920
8923 8921 /*
8924 8922 * Deallocate memory for SCSI FMA.
8925 8923 */
8926 8924 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8927 8925
8928 8926 /*
8929 8927 * Unregister and free device id if it was not registered
8930 8928 * by the transport.
8931 8929 */
8932 8930 if (un->un_f_devid_transport_defined == FALSE)
8933 8931 ddi_devid_unregister(devi);
8934 8932
8935 8933 /*
8936 8934 * free the devid structure if allocated before (by ddi_devid_init()
8937 8935 * or ddi_devid_get()).
8938 8936 */
8939 8937 if (un->un_devid) {
8940 8938 ddi_devid_free(un->un_devid);
8941 8939 un->un_devid = NULL;
8942 8940 }
8943 8941
8944 8942 /*
8945 8943 * Destroy wmap cache if it exists.
8946 8944 */
8947 8945 if (un->un_wm_cache != NULL) {
8948 8946 kmem_cache_destroy(un->un_wm_cache);
8949 8947 un->un_wm_cache = NULL;
8950 8948 }
8951 8949
8952 8950 /*
8953 8951 * kstat cleanup is done in detach for all device types (4363169).
8954 8952 * We do not want to fail detach if the device kstats are not deleted
8955 8953 * since there is a confusion about the devo_refcnt for the device.
8956 8954 * We just delete the kstats and let detach complete successfully.
8957 8955 */
8958 8956 if (un->un_stats != NULL) {
8959 8957 kstat_delete(un->un_stats);
8960 8958 un->un_stats = NULL;
8961 8959 }
8962 8960 if (un->un_errstats != NULL) {
8963 8961 kstat_delete(un->un_errstats);
8964 8962 un->un_errstats = NULL;
8965 8963 }
8966 8964
8967 8965 /* Remove partition stats */
8968 8966 if (un->un_f_pkstats_enabled) {
8969 8967 for (i = 0; i < NSDMAP; i++) {
8970 8968 if (un->un_pstats[i] != NULL) {
8971 8969 kstat_delete(un->un_pstats[i]);
8972 8970 un->un_pstats[i] = NULL;
8973 8971 }
8974 8972 }
8975 8973 }
8976 8974
8977 8975 /* Remove xbuf registration */
8978 8976 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8979 8977 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8980 8978
8981 8979 /* Remove driver properties */
8982 8980 ddi_prop_remove_all(devi);
8983 8981
8984 8982 mutex_destroy(&un->un_pm_mutex);
8985 8983 cv_destroy(&un->un_pm_busy_cv);
8986 8984
8987 8985 cv_destroy(&un->un_wcc_cv);
8988 8986
8989 8987 /* Open/close semaphore */
8990 8988 sema_destroy(&un->un_semoclose);
8991 8989
8992 8990 /* Removable media condvar. */
8993 8991 cv_destroy(&un->un_state_cv);
8994 8992
8995 8993 /* Suspend/resume condvar. */
8996 8994 cv_destroy(&un->un_suspend_cv);
8997 8995 cv_destroy(&un->un_disk_busy_cv);
8998 8996
8999 8997 sd_free_rqs(un);
9000 8998
9001 8999 /* Free up soft state */
9002 9000 devp->sd_private = NULL;
9003 9001
9004 9002 bzero(un, sizeof (struct sd_lun));
9005 9003 #ifndef XPV_HVM_DRIVER
9006 9004 ddi_soft_state_free(sd_state, instance);
9007 9005 #endif /* !XPV_HVM_DRIVER */
9008 9006
9009 9007 mutex_exit(&sd_detach_mutex);
9010 9008
9011 9009 /* This frees up the INQUIRY data associated with the device. */
9012 9010 scsi_unprobe(devp);
9013 9011
9014 9012 /*
9015 9013 * After successfully detaching an instance, we update the information
9016 9014 * of how many luns have been attached in the relative target and
9017 9015 * controller for parallel SCSI. This information is used when sd tries
9018 9016 * to set the tagged queuing capability in HBA.
9019 9017 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
9020 9018 * check if the device is parallel SCSI. However, we don't need to
9021 9019 * check here because we've already checked during attach. No device
9022 9020 * that is not parallel SCSI is in the chain.
9023 9021 */
9024 9022 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9025 9023 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9026 9024 }
9027 9025
9028 9026 return (DDI_SUCCESS);
9029 9027
9030 9028 err_notclosed:
9031 9029 mutex_exit(SD_MUTEX(un));
9032 9030
9033 9031 err_stillbusy:
9034 9032 _NOTE(NO_COMPETING_THREADS_NOW);
9035 9033
9036 9034 err_remove_event:
9037 9035 mutex_enter(&sd_detach_mutex);
9038 9036 un->un_detach_count--;
9039 9037 mutex_exit(&sd_detach_mutex);
9040 9038
9041 9039 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
9042 9040 return (DDI_FAILURE);
9043 9041 }
9044 9042
9045 9043
9046 9044 /*
9047 9045 * Function: sd_create_errstats
9048 9046 *
9049 9047 * Description: This routine instantiates the device error stats.
9050 9048 *
9051 9049 * Note: During attach the stats are instantiated first so they are
9052 9050 * available for attach-time routines that utilize the driver
9053 9051 * iopath to send commands to the device. The stats are initialized
9054 9052 * separately so data obtained during some attach-time routines is
9055 9053 * available. (4362483)
9056 9054 *
9057 9055 * Arguments: un - driver soft state (unit) structure
9058 9056 * instance - driver instance
9059 9057 *
9060 9058 * Context: Kernel thread context
9061 9059 */
9062 9060
9063 9061 static void
9064 9062 sd_create_errstats(struct sd_lun *un, int instance)
9065 9063 {
9066 9064 struct sd_errstats *stp;
9067 9065 char kstatmodule_err[KSTAT_STRLEN];
9068 9066 char kstatname[KSTAT_STRLEN];
9069 9067 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
9070 9068
9071 9069 ASSERT(un != NULL);
9072 9070
9073 9071 if (un->un_errstats != NULL) {
9074 9072 return;
9075 9073 }
9076 9074
9077 9075 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
9078 9076 "%serr", sd_label);
9079 9077 (void) snprintf(kstatname, sizeof (kstatname),
9080 9078 "%s%d,err", sd_label, instance);
9081 9079
9082 9080 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
9083 9081 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
9084 9082
9085 9083 if (un->un_errstats == NULL) {
9086 9084 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
9087 9085 "sd_create_errstats: Failed kstat_create\n");
9088 9086 return;
9089 9087 }
9090 9088
9091 9089 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9092 9090 kstat_named_init(&stp->sd_softerrs, "Soft Errors",
9093 9091 KSTAT_DATA_UINT32);
9094 9092 kstat_named_init(&stp->sd_harderrs, "Hard Errors",
9095 9093 KSTAT_DATA_UINT32);
9096 9094 kstat_named_init(&stp->sd_transerrs, "Transport Errors",
9097 9095 KSTAT_DATA_UINT32);
9098 9096 kstat_named_init(&stp->sd_vid, "Vendor",
9099 9097 KSTAT_DATA_CHAR);
9100 9098 kstat_named_init(&stp->sd_pid, "Product",
9101 9099 KSTAT_DATA_CHAR);
9102 9100 kstat_named_init(&stp->sd_revision, "Revision",
9103 9101 KSTAT_DATA_CHAR);
9104 9102 kstat_named_init(&stp->sd_serial, "Serial No",
9105 9103 KSTAT_DATA_CHAR);
9106 9104 kstat_named_init(&stp->sd_capacity, "Size",
9107 9105 KSTAT_DATA_ULONGLONG);
9108 9106 kstat_named_init(&stp->sd_rq_media_err, "Media Error",
9109 9107 KSTAT_DATA_UINT32);
9110 9108 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
9111 9109 KSTAT_DATA_UINT32);
9112 9110 kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
9113 9111 KSTAT_DATA_UINT32);
9114 9112 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
9115 9113 KSTAT_DATA_UINT32);
9116 9114 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
9117 9115 KSTAT_DATA_UINT32);
9118 9116 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
9119 9117 KSTAT_DATA_UINT32);
9120 9118
9121 9119 un->un_errstats->ks_private = un;
9122 9120 un->un_errstats->ks_update = nulldev;
9123 9121
9124 9122 kstat_install(un->un_errstats);
9125 9123 }
9126 9124
9127 9125
9128 9126 /*
9129 9127 * Function: sd_set_errstats
9130 9128 *
9131 9129 * Description: This routine sets the value of the vendor id, product id,
9132 9130 * revision, serial number, and capacity device error stats.
9133 9131 *
9134 9132 * Note: During attach the stats are instantiated first so they are
9135 9133 * available for attach-time routines that utilize the driver
9136 9134 * iopath to send commands to the device. The stats are initialized
9137 9135 * separately so data obtained during some attach-time routines is
9138 9136 * available. (4362483)
9139 9137 *
9140 9138 * Arguments: un - driver soft state (unit) structure
9141 9139 *
9142 9140 * Context: Kernel thread context
9143 9141 */
9144 9142
9145 9143 static void
9146 9144 sd_set_errstats(struct sd_lun *un)
9147 9145 {
9148 9146 struct sd_errstats *stp;
9149 9147 char *sn;
9150 9148
9151 9149 ASSERT(un != NULL);
9152 9150 ASSERT(un->un_errstats != NULL);
9153 9151 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9154 9152 ASSERT(stp != NULL);
9155 9153 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
9156 9154 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
9157 9155 (void) strncpy(stp->sd_revision.value.c,
9158 9156 un->un_sd->sd_inq->inq_revision, 4);
9159 9157
9160 9158 /*
9161 9159 * All the errstats are persistent across detach/attach,
9162 9160 * so reset all the errstats here in case of the hot
9163 9161 * replacement of disk drives, except for not changed
9164 9162 * Sun qualified drives.
9165 9163 */
9166 9164 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
9167 9165 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9168 9166 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
9169 9167 stp->sd_softerrs.value.ui32 = 0;
9170 9168 stp->sd_harderrs.value.ui32 = 0;
9171 9169 stp->sd_transerrs.value.ui32 = 0;
9172 9170 stp->sd_rq_media_err.value.ui32 = 0;
9173 9171 stp->sd_rq_ntrdy_err.value.ui32 = 0;
9174 9172 stp->sd_rq_nodev_err.value.ui32 = 0;
9175 9173 stp->sd_rq_recov_err.value.ui32 = 0;
9176 9174 stp->sd_rq_illrq_err.value.ui32 = 0;
9177 9175 stp->sd_rq_pfa_err.value.ui32 = 0;
9178 9176 }
9179 9177
9180 9178 /*
9181 9179 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9182 9180 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9183 9181 * (4376302))
9184 9182 */
9185 9183 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
9186 9184 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9187 9185 sizeof (SD_INQUIRY(un)->inq_serial));
9188 9186 } else {
9189 9187 /*
9190 9188 * Set the "Serial No" kstat for non-Sun qualified drives
9191 9189 */
9192 9190 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
9193 9191 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9194 9192 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
9195 9193 (void) strlcpy(stp->sd_serial.value.c, sn,
9196 9194 sizeof (stp->sd_serial.value.c));
9197 9195 ddi_prop_free(sn);
9198 9196 }
9199 9197 }
9200 9198
9201 9199 if (un->un_f_blockcount_is_valid != TRUE) {
9202 9200 /*
9203 9201 * Set capacity error stat to 0 for no media. This ensures
9204 9202 * a valid capacity is displayed in response to 'iostat -E'
9205 9203 * when no media is present in the device.
9206 9204 */
9207 9205 stp->sd_capacity.value.ui64 = 0;
9208 9206 } else {
9209 9207 /*
9210 9208 * Multiply un_blockcount by un->un_sys_blocksize to get
9211 9209 * capacity.
9212 9210 *
9213 9211 * Note: for non-512 blocksize devices "un_blockcount" has been
9214 9212 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9215 9213 * (un_tgt_blocksize / un->un_sys_blocksize).
9216 9214 */
9217 9215 stp->sd_capacity.value.ui64 = (uint64_t)
9218 9216 ((uint64_t)un->un_blockcount * un->un_sys_blocksize);
9219 9217 }
9220 9218 }
9221 9219
9222 9220
9223 9221 /*
9224 9222 * Function: sd_set_pstats
9225 9223 *
9226 9224 * Description: This routine instantiates and initializes the partition
9227 9225 * stats for each partition with more than zero blocks.
9228 9226 * (4363169)
9229 9227 *
9230 9228 * Arguments: un - driver soft state (unit) structure
9231 9229 *
9232 9230 * Context: Kernel thread context
9233 9231 */
9234 9232
9235 9233 static void
9236 9234 sd_set_pstats(struct sd_lun *un)
9237 9235 {
9238 9236 char kstatname[KSTAT_STRLEN];
9239 9237 int instance;
9240 9238 int i;
9241 9239 diskaddr_t nblks = 0;
9242 9240 char *partname = NULL;
9243 9241
9244 9242 ASSERT(un != NULL);
9245 9243
9246 9244 instance = ddi_get_instance(SD_DEVINFO(un));
9247 9245
9248 9246 /* Note:x86: is this a VTOC8/VTOC16 difference? */
9249 9247 for (i = 0; i < NSDMAP; i++) {
9250 9248
9251 9249 if (cmlb_partinfo(un->un_cmlbhandle, i,
9252 9250 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9253 9251 continue;
9254 9252 mutex_enter(SD_MUTEX(un));
9255 9253
9256 9254 if ((un->un_pstats[i] == NULL) &&
9257 9255 (nblks != 0)) {
9258 9256
9259 9257 (void) snprintf(kstatname, sizeof (kstatname),
9260 9258 "%s%d,%s", sd_label, instance,
9261 9259 partname);
9262 9260
9263 9261 un->un_pstats[i] = kstat_create(sd_label,
9264 9262 instance, kstatname, "partition", KSTAT_TYPE_IO,
9265 9263 1, KSTAT_FLAG_PERSISTENT);
9266 9264 if (un->un_pstats[i] != NULL) {
9267 9265 un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9268 9266 kstat_install(un->un_pstats[i]);
9269 9267 }
9270 9268 }
9271 9269 mutex_exit(SD_MUTEX(un));
9272 9270 }
9273 9271 }
9274 9272
9275 9273
9276 9274 #if (defined(__fibre))
9277 9275 /*
9278 9276 * Function: sd_init_event_callbacks
9279 9277 *
9280 9278 * Description: This routine initializes the insertion and removal event
9281 9279 * callbacks. (fibre only)
9282 9280 *
9283 9281 * Arguments: un - driver soft state (unit) structure
9284 9282 *
9285 9283 * Context: Kernel thread context
9286 9284 */
9287 9285
9288 9286 static void
9289 9287 sd_init_event_callbacks(struct sd_lun *un)
9290 9288 {
9291 9289 ASSERT(un != NULL);
9292 9290
9293 9291 if ((un->un_insert_event == NULL) &&
9294 9292 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9295 9293 &un->un_insert_event) == DDI_SUCCESS)) {
9296 9294 /*
9297 9295 * Add the callback for an insertion event
9298 9296 */
9299 9297 (void) ddi_add_event_handler(SD_DEVINFO(un),
9300 9298 un->un_insert_event, sd_event_callback, (void *)un,
9301 9299 &(un->un_insert_cb_id));
9302 9300 }
9303 9301
9304 9302 if ((un->un_remove_event == NULL) &&
9305 9303 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9306 9304 &un->un_remove_event) == DDI_SUCCESS)) {
9307 9305 /*
9308 9306 * Add the callback for a removal event
9309 9307 */
9310 9308 (void) ddi_add_event_handler(SD_DEVINFO(un),
9311 9309 un->un_remove_event, sd_event_callback, (void *)un,
9312 9310 &(un->un_remove_cb_id));
9313 9311 }
9314 9312 }
9315 9313
9316 9314
9317 9315 /*
9318 9316 * Function: sd_event_callback
9319 9317 *
9320 9318 * Description: This routine handles insert/remove events (photon). The
9321 9319 * state is changed to OFFLINE which can be used to supress
9322 9320 * error msgs. (fibre only)
9323 9321 *
9324 9322 * Arguments: un - driver soft state (unit) structure
9325 9323 *
9326 9324 * Context: Callout thread context
9327 9325 */
9328 9326 /* ARGSUSED */
9329 9327 static void
9330 9328 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9331 9329 void *bus_impldata)
9332 9330 {
9333 9331 struct sd_lun *un = (struct sd_lun *)arg;
9334 9332
9335 9333 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9336 9334 if (event == un->un_insert_event) {
9337 9335 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9338 9336 mutex_enter(SD_MUTEX(un));
9339 9337 if (un->un_state == SD_STATE_OFFLINE) {
9340 9338 if (un->un_last_state != SD_STATE_SUSPENDED) {
9341 9339 un->un_state = un->un_last_state;
9342 9340 } else {
9343 9341 /*
9344 9342 * We have gone through SUSPEND/RESUME while
9345 9343 * we were offline. Restore the last state
9346 9344 */
9347 9345 un->un_state = un->un_save_state;
9348 9346 }
9349 9347 }
9350 9348 mutex_exit(SD_MUTEX(un));
9351 9349
9352 9350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9353 9351 } else if (event == un->un_remove_event) {
9354 9352 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9355 9353 mutex_enter(SD_MUTEX(un));
9356 9354 /*
9357 9355 * We need to handle an event callback that occurs during
9358 9356 * the suspend operation, since we don't prevent it.
9359 9357 */
9360 9358 if (un->un_state != SD_STATE_OFFLINE) {
9361 9359 if (un->un_state != SD_STATE_SUSPENDED) {
9362 9360 New_state(un, SD_STATE_OFFLINE);
9363 9361 } else {
9364 9362 un->un_last_state = SD_STATE_OFFLINE;
9365 9363 }
9366 9364 }
9367 9365 mutex_exit(SD_MUTEX(un));
9368 9366 } else {
9369 9367 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9370 9368 "!Unknown event\n");
9371 9369 }
9372 9370
9373 9371 }
9374 9372 #endif
9375 9373
9376 9374 /*
9377 9375 * Function: sd_cache_control()
9378 9376 *
9379 9377 * Description: This routine is the driver entry point for setting
9380 9378 * read and write caching by modifying the WCE (write cache
9381 9379 * enable) and RCD (read cache disable) bits of mode
9382 9380 * page 8 (MODEPAGE_CACHING).
9383 9381 *
9384 9382 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9385 9383 * structure for this target.
9386 9384 * rcd_flag - flag for controlling the read cache
9387 9385 * wce_flag - flag for controlling the write cache
9388 9386 *
9389 9387 * Return Code: EIO
9390 9388 * code returned by sd_send_scsi_MODE_SENSE and
9391 9389 * sd_send_scsi_MODE_SELECT
9392 9390 *
9393 9391 * Context: Kernel Thread
9394 9392 */
9395 9393
9396 9394 static int
9397 9395 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
9398 9396 {
9399 9397 struct mode_caching *mode_caching_page;
9400 9398 uchar_t *header;
9401 9399 size_t buflen;
9402 9400 int hdrlen;
9403 9401 int bd_len;
9404 9402 int rval = 0;
9405 9403 struct mode_header_grp2 *mhp;
9406 9404 struct sd_lun *un;
9407 9405 int status;
9408 9406
9409 9407 ASSERT(ssc != NULL);
9410 9408 un = ssc->ssc_un;
9411 9409 ASSERT(un != NULL);
9412 9410
9413 9411 /*
9414 9412 * Do a test unit ready, otherwise a mode sense may not work if this
9415 9413 * is the first command sent to the device after boot.
9416 9414 */
9417 9415 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9418 9416 if (status != 0)
9419 9417 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9420 9418
9421 9419 if (un->un_f_cfg_is_atapi == TRUE) {
9422 9420 hdrlen = MODE_HEADER_LENGTH_GRP2;
9423 9421 } else {
9424 9422 hdrlen = MODE_HEADER_LENGTH;
9425 9423 }
9426 9424
9427 9425 /*
9428 9426 * Allocate memory for the retrieved mode page and its headers. Set
9429 9427 * a pointer to the page itself. Use mode_cache_scsi3 to insure
9430 9428 * we get all of the mode sense data otherwise, the mode select
9431 9429 * will fail. mode_cache_scsi3 is a superset of mode_caching.
9432 9430 */
9433 9431 buflen = hdrlen + MODE_BLK_DESC_LENGTH +
9434 9432 sizeof (struct mode_cache_scsi3);
9435 9433
9436 9434 header = kmem_zalloc(buflen, KM_SLEEP);
9437 9435
9438 9436 /* Get the information from the device. */
9439 9437 if (un->un_f_cfg_is_atapi == TRUE) {
9440 9438 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9441 9439 MODEPAGE_CACHING, SD_PATH_DIRECT);
9442 9440 } else {
9443 9441 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9444 9442 MODEPAGE_CACHING, SD_PATH_DIRECT);
9445 9443 }
9446 9444
9447 9445 if (rval != 0) {
9448 9446 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9449 9447 "sd_cache_control: Mode Sense Failed\n");
9450 9448 goto mode_sense_failed;
9451 9449 }
9452 9450
9453 9451 /*
9454 9452 * Determine size of Block Descriptors in order to locate
9455 9453 * the mode page data. ATAPI devices return 0, SCSI devices
9456 9454 * should return MODE_BLK_DESC_LENGTH.
9457 9455 */
9458 9456 if (un->un_f_cfg_is_atapi == TRUE) {
9459 9457 mhp = (struct mode_header_grp2 *)header;
9460 9458 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9461 9459 } else {
9462 9460 bd_len = ((struct mode_header *)header)->bdesc_length;
9463 9461 }
9464 9462
9465 9463 if (bd_len > MODE_BLK_DESC_LENGTH) {
9466 9464 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9467 9465 "sd_cache_control: Mode Sense returned invalid block "
9468 9466 "descriptor length\n");
9469 9467 rval = EIO;
9470 9468 goto mode_sense_failed;
9471 9469 }
9472 9470
9473 9471 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9474 9472 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9475 9473 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9476 9474 "sd_cache_control: Mode Sense caching page code mismatch "
9477 9475 "%d\n", mode_caching_page->mode_page.code);
9478 9476 rval = EIO;
9479 9477 goto mode_sense_failed;
9480 9478 }
9481 9479
9482 9480 /* Check the relevant bits on successful mode sense. */
9483 9481 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
9484 9482 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
9485 9483 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
9486 9484 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
9487 9485
9488 9486 size_t sbuflen;
9489 9487 uchar_t save_pg;
9490 9488
9491 9489 /*
9492 9490 * Construct select buffer length based on the
9493 9491 * length of the sense data returned.
9494 9492 */
9495 9493 sbuflen = hdrlen + bd_len +
9496 9494 sizeof (struct mode_page) +
9497 9495 (int)mode_caching_page->mode_page.length;
9498 9496
9499 9497 /*
9500 9498 * Set the caching bits as requested.
9501 9499 */
9502 9500 if (rcd_flag == SD_CACHE_ENABLE)
9503 9501 mode_caching_page->rcd = 0;
9504 9502 else if (rcd_flag == SD_CACHE_DISABLE)
9505 9503 mode_caching_page->rcd = 1;
9506 9504
9507 9505 if (wce_flag == SD_CACHE_ENABLE)
9508 9506 mode_caching_page->wce = 1;
9509 9507 else if (wce_flag == SD_CACHE_DISABLE)
9510 9508 mode_caching_page->wce = 0;
9511 9509
9512 9510 /*
9513 9511 * Save the page if the mode sense says the
9514 9512 * drive supports it.
9515 9513 */
9516 9514 save_pg = mode_caching_page->mode_page.ps ?
9517 9515 SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
9518 9516
9519 9517 /* Clear reserved bits before mode select. */
9520 9518 mode_caching_page->mode_page.ps = 0;
9521 9519
9522 9520 /*
9523 9521 * Clear out mode header for mode select.
9524 9522 * The rest of the retrieved page will be reused.
9525 9523 */
9526 9524 bzero(header, hdrlen);
9527 9525
9528 9526 if (un->un_f_cfg_is_atapi == TRUE) {
9529 9527 mhp = (struct mode_header_grp2 *)header;
9530 9528 mhp->bdesc_length_hi = bd_len >> 8;
9531 9529 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff;
9532 9530 } else {
9533 9531 ((struct mode_header *)header)->bdesc_length = bd_len;
9534 9532 }
9535 9533
9536 9534 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9537 9535
9538 9536 /* Issue mode select to change the cache settings */
9539 9537 if (un->un_f_cfg_is_atapi == TRUE) {
9540 9538 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header,
9541 9539 sbuflen, save_pg, SD_PATH_DIRECT);
9542 9540 } else {
9543 9541 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
9544 9542 sbuflen, save_pg, SD_PATH_DIRECT);
9545 9543 }
9546 9544
9547 9545 }
9548 9546
9549 9547
9550 9548 mode_sense_failed:
9551 9549
9552 9550 kmem_free(header, buflen);
9553 9551
9554 9552 if (rval != 0) {
9555 9553 if (rval == EIO)
9556 9554 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9557 9555 else
9558 9556 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9559 9557 }
9560 9558 return (rval);
9561 9559 }
9562 9560
9563 9561
9564 9562 /*
9565 9563 * Function: sd_get_write_cache_enabled()
9566 9564 *
9567 9565 * Description: This routine is the driver entry point for determining if
9568 9566 * write caching is enabled. It examines the WCE (write cache
9569 9567 * enable) bits of mode page 8 (MODEPAGE_CACHING).
9570 9568 *
9571 9569 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9572 9570 * structure for this target.
9573 9571 * is_enabled - pointer to int where write cache enabled state
9574 9572 * is returned (non-zero -> write cache enabled)
9575 9573 *
9576 9574 *
9577 9575 * Return Code: EIO
9578 9576 * code returned by sd_send_scsi_MODE_SENSE
9579 9577 *
9580 9578 * Context: Kernel Thread
9581 9579 *
9582 9580 * NOTE: If ioctl is added to disable write cache, this sequence should
9583 9581 * be followed so that no locking is required for accesses to
9584 9582 * un->un_f_write_cache_enabled:
9585 9583 * do mode select to clear wce
9586 9584 * do synchronize cache to flush cache
9587 9585 * set un->un_f_write_cache_enabled = FALSE
9588 9586 *
9589 9587 * Conversely, an ioctl to enable the write cache should be done
9590 9588 * in this order:
9591 9589 * set un->un_f_write_cache_enabled = TRUE
9592 9590 * do mode select to set wce
9593 9591 */
9594 9592
9595 9593 static int
9596 9594 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
9597 9595 {
9598 9596 struct mode_caching *mode_caching_page;
9599 9597 uchar_t *header;
9600 9598 size_t buflen;
9601 9599 int hdrlen;
9602 9600 int bd_len;
9603 9601 int rval = 0;
9604 9602 struct sd_lun *un;
9605 9603 int status;
9606 9604
9607 9605 ASSERT(ssc != NULL);
9608 9606 un = ssc->ssc_un;
9609 9607 ASSERT(un != NULL);
9610 9608 ASSERT(is_enabled != NULL);
9611 9609
9612 9610 /* in case of error, flag as enabled */
9613 9611 *is_enabled = TRUE;
9614 9612
9615 9613 /*
9616 9614 * Do a test unit ready, otherwise a mode sense may not work if this
9617 9615 * is the first command sent to the device after boot.
9618 9616 */
9619 9617 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9620 9618
9621 9619 if (status != 0)
9622 9620 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9623 9621
9624 9622 if (un->un_f_cfg_is_atapi == TRUE) {
9625 9623 hdrlen = MODE_HEADER_LENGTH_GRP2;
9626 9624 } else {
9627 9625 hdrlen = MODE_HEADER_LENGTH;
9628 9626 }
9629 9627
9630 9628 /*
9631 9629 * Allocate memory for the retrieved mode page and its headers. Set
9632 9630 * a pointer to the page itself.
9633 9631 */
9634 9632 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching);
9635 9633 header = kmem_zalloc(buflen, KM_SLEEP);
9636 9634
9637 9635 /* Get the information from the device. */
9638 9636 if (un->un_f_cfg_is_atapi == TRUE) {
9639 9637 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9640 9638 MODEPAGE_CACHING, SD_PATH_DIRECT);
9641 9639 } else {
9642 9640 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9643 9641 MODEPAGE_CACHING, SD_PATH_DIRECT);
9644 9642 }
9645 9643
9646 9644 if (rval != 0) {
9647 9645 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9648 9646 "sd_get_write_cache_enabled: Mode Sense Failed\n");
9649 9647 goto mode_sense_failed;
9650 9648 }
9651 9649
9652 9650 /*
9653 9651 * Determine size of Block Descriptors in order to locate
9654 9652 * the mode page data. ATAPI devices return 0, SCSI devices
9655 9653 * should return MODE_BLK_DESC_LENGTH.
9656 9654 */
9657 9655 if (un->un_f_cfg_is_atapi == TRUE) {
9658 9656 struct mode_header_grp2 *mhp;
9659 9657 mhp = (struct mode_header_grp2 *)header;
9660 9658 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9661 9659 } else {
9662 9660 bd_len = ((struct mode_header *)header)->bdesc_length;
9663 9661 }
9664 9662
9665 9663 if (bd_len > MODE_BLK_DESC_LENGTH) {
9666 9664 /* FMA should make upset complain here */
9667 9665 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9668 9666 "sd_get_write_cache_enabled: Mode Sense returned invalid "
9669 9667 "block descriptor length\n");
9670 9668 rval = EIO;
9671 9669 goto mode_sense_failed;
9672 9670 }
9673 9671
9674 9672 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9675 9673 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9676 9674 /* FMA could make upset complain here */
9677 9675 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9678 9676 "sd_get_write_cache_enabled: Mode Sense caching page "
9679 9677 "code mismatch %d\n", mode_caching_page->mode_page.code);
9680 9678 rval = EIO;
9681 9679 goto mode_sense_failed;
9682 9680 }
9683 9681 *is_enabled = mode_caching_page->wce;
9684 9682
9685 9683 mode_sense_failed:
9686 9684 if (rval == 0) {
9687 9685 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9688 9686 } else if (rval == EIO) {
9689 9687 /*
9690 9688 * Some disks do not support mode sense(6), we
9691 9689 * should ignore this kind of error(sense key is
9692 9690 * 0x5 - illegal request).
9693 9691 */
9694 9692 uint8_t *sensep;
9695 9693 int senlen;
9696 9694
9697 9695 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
9698 9696 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
9699 9697 ssc->ssc_uscsi_cmd->uscsi_rqresid);
9700 9698
9701 9699 if (senlen > 0 &&
9702 9700 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
9703 9701 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
9704 9702 } else {
9705 9703 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9706 9704 }
9707 9705 } else {
9708 9706 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9709 9707 }
9710 9708 kmem_free(header, buflen);
9711 9709 return (rval);
9712 9710 }
9713 9711
9714 9712 /*
9715 9713 * Function: sd_get_nv_sup()
9716 9714 *
9717 9715 * Description: This routine is the driver entry point for
9718 9716 * determining whether non-volatile cache is supported. This
9719 9717 * determination process works as follows:
9720 9718 *
9721 9719 * 1. sd first queries sd.conf on whether
9722 9720 * suppress_cache_flush bit is set for this device.
9723 9721 *
9724 9722 * 2. if not there, then queries the internal disk table.
9725 9723 *
9726 9724 * 3. if either sd.conf or internal disk table specifies
9727 9725 * cache flush be suppressed, we don't bother checking
9728 9726 * NV_SUP bit.
9729 9727 *
9730 9728 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9731 9729 * the optional INQUIRY VPD page 0x86. If the device
9732 9730 * supports VPD page 0x86, sd examines the NV_SUP
9733 9731 * (non-volatile cache support) bit in the INQUIRY VPD page
9734 9732 * 0x86:
9735 9733 * o If NV_SUP bit is set, sd assumes the device has a
9736 9734 * non-volatile cache and set the
9737 9735 * un_f_sync_nv_supported to TRUE.
9738 9736 * o Otherwise cache is not non-volatile,
9739 9737 * un_f_sync_nv_supported is set to FALSE.
9740 9738 *
9741 9739 * Arguments: un - driver soft state (unit) structure
9742 9740 *
9743 9741 * Return Code:
9744 9742 *
9745 9743 * Context: Kernel Thread
9746 9744 */
9747 9745
9748 9746 static void
9749 9747 sd_get_nv_sup(sd_ssc_t *ssc)
9750 9748 {
9751 9749 int rval = 0;
9752 9750 uchar_t *inq86 = NULL;
9753 9751 size_t inq86_len = MAX_INQUIRY_SIZE;
9754 9752 size_t inq86_resid = 0;
9755 9753 struct dk_callback *dkc;
9756 9754 struct sd_lun *un;
9757 9755
9758 9756 ASSERT(ssc != NULL);
9759 9757 un = ssc->ssc_un;
9760 9758 ASSERT(un != NULL);
9761 9759
9762 9760 mutex_enter(SD_MUTEX(un));
9763 9761
9764 9762 /*
9765 9763 * Be conservative on the device's support of
9766 9764 * SYNC_NV bit: un_f_sync_nv_supported is
9767 9765 * initialized to be false.
9768 9766 */
9769 9767 un->un_f_sync_nv_supported = FALSE;
9770 9768
9771 9769 /*
9772 9770 * If either sd.conf or internal disk table
9773 9771 * specifies cache flush be suppressed, then
9774 9772 * we don't bother checking NV_SUP bit.
9775 9773 */
9776 9774 if (un->un_f_suppress_cache_flush == TRUE) {
9777 9775 mutex_exit(SD_MUTEX(un));
9778 9776 return;
9779 9777 }
9780 9778
9781 9779 if (sd_check_vpd_page_support(ssc) == 0 &&
9782 9780 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9783 9781 mutex_exit(SD_MUTEX(un));
9784 9782 /* collect page 86 data if available */
9785 9783 inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9786 9784
9787 9785 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9788 9786 0x01, 0x86, &inq86_resid);
9789 9787
9790 9788 if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9791 9789 SD_TRACE(SD_LOG_COMMON, un,
9792 9790 "sd_get_nv_sup: \
9793 9791 successfully get VPD page: %x \
9794 9792 PAGE LENGTH: %x BYTE 6: %x\n",
9795 9793 inq86[1], inq86[3], inq86[6]);
9796 9794
9797 9795 mutex_enter(SD_MUTEX(un));
9798 9796 /*
9799 9797 * check the value of NV_SUP bit: only if the device
9800 9798 * reports NV_SUP bit to be 1, the
9801 9799 * un_f_sync_nv_supported bit will be set to true.
9802 9800 */
9803 9801 if (inq86[6] & SD_VPD_NV_SUP) {
9804 9802 un->un_f_sync_nv_supported = TRUE;
9805 9803 }
9806 9804 mutex_exit(SD_MUTEX(un));
9807 9805 } else if (rval != 0) {
9808 9806 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9809 9807 }
9810 9808
9811 9809 kmem_free(inq86, inq86_len);
9812 9810 } else {
9813 9811 mutex_exit(SD_MUTEX(un));
9814 9812 }
9815 9813
9816 9814 /*
9817 9815 * Send a SYNC CACHE command to check whether
9818 9816 * SYNC_NV bit is supported. This command should have
9819 9817 * un_f_sync_nv_supported set to correct value.
9820 9818 */
9821 9819 mutex_enter(SD_MUTEX(un));
9822 9820 if (un->un_f_sync_nv_supported) {
9823 9821 mutex_exit(SD_MUTEX(un));
9824 9822 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
9825 9823 dkc->dkc_flag = FLUSH_VOLATILE;
9826 9824 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
9827 9825
9828 9826 /*
9829 9827 * Send a TEST UNIT READY command to the device. This should
9830 9828 * clear any outstanding UNIT ATTENTION that may be present.
9831 9829 */
9832 9830 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
9833 9831 if (rval != 0)
9834 9832 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9835 9833
9836 9834 kmem_free(dkc, sizeof (struct dk_callback));
9837 9835 } else {
9838 9836 mutex_exit(SD_MUTEX(un));
9839 9837 }
9840 9838
9841 9839 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
9842 9840 un_f_suppress_cache_flush is set to %d\n",
9843 9841 un->un_f_suppress_cache_flush);
9844 9842 }
9845 9843
9846 9844 /*
9847 9845 * Function: sd_make_device
9848 9846 *
9849 9847 * Description: Utility routine to return the Solaris device number from
9850 9848 * the data in the device's dev_info structure.
9851 9849 *
9852 9850 * Return Code: The Solaris device number
9853 9851 *
9854 9852 * Context: Any
9855 9853 */
9856 9854
9857 9855 static dev_t
9858 9856 sd_make_device(dev_info_t *devi)
9859 9857 {
9860 9858 return (makedevice(ddi_driver_major(devi),
9861 9859 ddi_get_instance(devi) << SDUNIT_SHIFT));
9862 9860 }
9863 9861
9864 9862
9865 9863 /*
9866 9864 * Function: sd_pm_entry
9867 9865 *
9868 9866 * Description: Called at the start of a new command to manage power
9869 9867 * and busy status of a device. This includes determining whether
9870 9868 * the current power state of the device is sufficient for
9871 9869 * performing the command or whether it must be changed.
9872 9870 * The PM framework is notified appropriately.
9873 9871 * Only with a return status of DDI_SUCCESS will the
9874 9872 * component be busy to the framework.
9875 9873 *
9876 9874 * All callers of sd_pm_entry must check the return status
9877 9875 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9878 9876 * of DDI_FAILURE indicates the device failed to power up.
9879 9877 * In this case un_pm_count has been adjusted so the result
9880 9878 * on exit is still powered down, ie. count is less than 0.
9881 9879 * Calling sd_pm_exit with this count value hits an ASSERT.
9882 9880 *
9883 9881 * Return Code: DDI_SUCCESS or DDI_FAILURE
9884 9882 *
9885 9883 * Context: Kernel thread context.
9886 9884 */
9887 9885
9888 9886 static int
9889 9887 sd_pm_entry(struct sd_lun *un)
9890 9888 {
9891 9889 int return_status = DDI_SUCCESS;
9892 9890
9893 9891 ASSERT(!mutex_owned(SD_MUTEX(un)));
9894 9892 ASSERT(!mutex_owned(&un->un_pm_mutex));
9895 9893
9896 9894 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
9897 9895
9898 9896 if (un->un_f_pm_is_enabled == FALSE) {
9899 9897 SD_TRACE(SD_LOG_IO_PM, un,
9900 9898 "sd_pm_entry: exiting, PM not enabled\n");
9901 9899 return (return_status);
9902 9900 }
9903 9901
9904 9902 /*
9905 9903 * Just increment a counter if PM is enabled. On the transition from
9906 9904 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9907 9905 * the count with each IO and mark the device as idle when the count
9908 9906 * hits 0.
9909 9907 *
9910 9908 * If the count is less than 0 the device is powered down. If a powered
9911 9909 * down device is successfully powered up then the count must be
9912 9910 * incremented to reflect the power up. Note that it'll get incremented
9913 9911 * a second time to become busy.
9914 9912 *
9915 9913 * Because the following has the potential to change the device state
9916 9914 * and must release the un_pm_mutex to do so, only one thread can be
9917 9915 * allowed through at a time.
9918 9916 */
9919 9917
9920 9918 mutex_enter(&un->un_pm_mutex);
9921 9919 while (un->un_pm_busy == TRUE) {
9922 9920 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
9923 9921 }
9924 9922 un->un_pm_busy = TRUE;
9925 9923
9926 9924 if (un->un_pm_count < 1) {
9927 9925
9928 9926 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
9929 9927
9930 9928 /*
9931 9929 * Indicate we are now busy so the framework won't attempt to
9932 9930 * power down the device. This call will only fail if either
9933 9931 * we passed a bad component number or the device has no
9934 9932 * components. Neither of these should ever happen.
9935 9933 */
9936 9934 mutex_exit(&un->un_pm_mutex);
9937 9935 return_status = pm_busy_component(SD_DEVINFO(un), 0);
9938 9936 ASSERT(return_status == DDI_SUCCESS);
9939 9937
9940 9938 mutex_enter(&un->un_pm_mutex);
9941 9939
9942 9940 if (un->un_pm_count < 0) {
9943 9941 mutex_exit(&un->un_pm_mutex);
9944 9942
9945 9943 SD_TRACE(SD_LOG_IO_PM, un,
9946 9944 "sd_pm_entry: power up component\n");
9947 9945
9948 9946 /*
9949 9947 * pm_raise_power will cause sdpower to be called
9950 9948 * which brings the device power level to the
9951 9949 * desired state, If successful, un_pm_count and
9952 9950 * un_power_level will be updated appropriately.
9953 9951 */
9954 9952 return_status = pm_raise_power(SD_DEVINFO(un), 0,
9955 9953 SD_PM_STATE_ACTIVE(un));
9956 9954
9957 9955 mutex_enter(&un->un_pm_mutex);
9958 9956
9959 9957 if (return_status != DDI_SUCCESS) {
9960 9958 /*
9961 9959 * Power up failed.
9962 9960 * Idle the device and adjust the count
9963 9961 * so the result on exit is that we're
9964 9962 * still powered down, ie. count is less than 0.
9965 9963 */
9966 9964 SD_TRACE(SD_LOG_IO_PM, un,
9967 9965 "sd_pm_entry: power up failed,"
9968 9966 " idle the component\n");
9969 9967
9970 9968 (void) pm_idle_component(SD_DEVINFO(un), 0);
9971 9969 un->un_pm_count--;
9972 9970 } else {
9973 9971 /*
9974 9972 * Device is powered up, verify the
9975 9973 * count is non-negative.
9976 9974 * This is debug only.
9977 9975 */
9978 9976 ASSERT(un->un_pm_count == 0);
9979 9977 }
9980 9978 }
9981 9979
9982 9980 if (return_status == DDI_SUCCESS) {
9983 9981 /*
9984 9982 * For performance, now that the device has been tagged
9985 9983 * as busy, and it's known to be powered up, update the
9986 9984 * chain types to use jump tables that do not include
9987 9985 * pm. This significantly lowers the overhead and
9988 9986 * therefore improves performance.
9989 9987 */
9990 9988
9991 9989 mutex_exit(&un->un_pm_mutex);
9992 9990 mutex_enter(SD_MUTEX(un));
9993 9991 SD_TRACE(SD_LOG_IO_PM, un,
9994 9992 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9995 9993 un->un_uscsi_chain_type);
9996 9994
9997 9995 if (un->un_f_non_devbsize_supported) {
9998 9996 un->un_buf_chain_type =
9999 9997 SD_CHAIN_INFO_RMMEDIA_NO_PM;
10000 9998 } else {
10001 9999 un->un_buf_chain_type =
10002 10000 SD_CHAIN_INFO_DISK_NO_PM;
10003 10001 }
10004 10002 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
10005 10003
10006 10004 SD_TRACE(SD_LOG_IO_PM, un,
10007 10005 " changed uscsi_chain_type to %d\n",
10008 10006 un->un_uscsi_chain_type);
10009 10007 mutex_exit(SD_MUTEX(un));
10010 10008 mutex_enter(&un->un_pm_mutex);
10011 10009
10012 10010 if (un->un_pm_idle_timeid == NULL) {
10013 10011 /* 300 ms. */
10014 10012 un->un_pm_idle_timeid =
10015 10013 timeout(sd_pm_idletimeout_handler, un,
10016 10014 (drv_usectohz((clock_t)300000)));
10017 10015 /*
10018 10016 * Include an extra call to busy which keeps the
10019 10017 * device busy with-respect-to the PM layer
10020 10018 * until the timer fires, at which time it'll
10021 10019 * get the extra idle call.
10022 10020 */
10023 10021 (void) pm_busy_component(SD_DEVINFO(un), 0);
10024 10022 }
10025 10023 }
10026 10024 }
10027 10025 un->un_pm_busy = FALSE;
10028 10026 /* Next... */
10029 10027 cv_signal(&un->un_pm_busy_cv);
10030 10028
10031 10029 un->un_pm_count++;
10032 10030
10033 10031 SD_TRACE(SD_LOG_IO_PM, un,
10034 10032 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
10035 10033
10036 10034 mutex_exit(&un->un_pm_mutex);
10037 10035
10038 10036 return (return_status);
10039 10037 }
10040 10038
10041 10039
10042 10040 /*
10043 10041 * Function: sd_pm_exit
10044 10042 *
10045 10043 * Description: Called at the completion of a command to manage busy
10046 10044 * status for the device. If the device becomes idle the
10047 10045 * PM framework is notified.
10048 10046 *
10049 10047 * Context: Kernel thread context
10050 10048 */
10051 10049
10052 10050 static void
10053 10051 sd_pm_exit(struct sd_lun *un)
10054 10052 {
10055 10053 ASSERT(!mutex_owned(SD_MUTEX(un)));
10056 10054 ASSERT(!mutex_owned(&un->un_pm_mutex));
10057 10055
10058 10056 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
10059 10057
10060 10058 /*
10061 10059 * After attach the following flag is only read, so don't
10062 10060 * take the penalty of acquiring a mutex for it.
10063 10061 */
10064 10062 if (un->un_f_pm_is_enabled == TRUE) {
10065 10063
10066 10064 mutex_enter(&un->un_pm_mutex);
10067 10065 un->un_pm_count--;
10068 10066
10069 10067 SD_TRACE(SD_LOG_IO_PM, un,
10070 10068 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
10071 10069
10072 10070 ASSERT(un->un_pm_count >= 0);
10073 10071 if (un->un_pm_count == 0) {
10074 10072 mutex_exit(&un->un_pm_mutex);
10075 10073
10076 10074 SD_TRACE(SD_LOG_IO_PM, un,
10077 10075 "sd_pm_exit: idle component\n");
10078 10076
10079 10077 (void) pm_idle_component(SD_DEVINFO(un), 0);
10080 10078
10081 10079 } else {
10082 10080 mutex_exit(&un->un_pm_mutex);
10083 10081 }
10084 10082 }
10085 10083
10086 10084 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
10087 10085 }
10088 10086
10089 10087
10090 10088 /*
10091 10089 * Function: sdopen
10092 10090 *
10093 10091 * Description: Driver's open(9e) entry point function.
10094 10092 *
10095 10093 * Arguments: dev_i - pointer to device number
10096 10094 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10097 10095 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10098 10096 * cred_p - user credential pointer
10099 10097 *
10100 10098 * Return Code: EINVAL
10101 10099 * ENXIO
10102 10100 * EIO
10103 10101 * EROFS
10104 10102 * EBUSY
10105 10103 *
10106 10104 * Context: Kernel thread context
10107 10105 */
10108 10106 /* ARGSUSED */
10109 10107 static int
10110 10108 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
10111 10109 {
10112 10110 struct sd_lun *un;
10113 10111 int nodelay;
10114 10112 int part;
10115 10113 uint64_t partmask;
10116 10114 int instance;
10117 10115 dev_t dev;
10118 10116 int rval = EIO;
10119 10117 diskaddr_t nblks = 0;
10120 10118 diskaddr_t label_cap;
10121 10119
10122 10120 /* Validate the open type */
10123 10121 if (otyp >= OTYPCNT) {
10124 10122 return (EINVAL);
10125 10123 }
10126 10124
10127 10125 dev = *dev_p;
10128 10126 instance = SDUNIT(dev);
10129 10127 mutex_enter(&sd_detach_mutex);
10130 10128
10131 10129 /*
10132 10130 * Fail the open if there is no softstate for the instance, or
10133 10131 * if another thread somewhere is trying to detach the instance.
10134 10132 */
10135 10133 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10136 10134 (un->un_detach_count != 0)) {
10137 10135 mutex_exit(&sd_detach_mutex);
10138 10136 /*
10139 10137 * The probe cache only needs to be cleared when open (9e) fails
10140 10138 * with ENXIO (4238046).
10141 10139 */
10142 10140 /*
10143 10141 * un-conditionally clearing probe cache is ok with
10144 10142 * separate sd/ssd binaries
10145 10143 * x86 platform can be an issue with both parallel
10146 10144 * and fibre in 1 binary
10147 10145 */
10148 10146 sd_scsi_clear_probe_cache();
10149 10147 return (ENXIO);
10150 10148 }
10151 10149
10152 10150 /*
10153 10151 * The un_layer_count is to prevent another thread in specfs from
10154 10152 * trying to detach the instance, which can happen when we are
10155 10153 * called from a higher-layer driver instead of thru specfs.
10156 10154 * This will not be needed when DDI provides a layered driver
10157 10155 * interface that allows specfs to know that an instance is in
10158 10156 * use by a layered driver & should not be detached.
10159 10157 *
10160 10158 * Note: the semantics for layered driver opens are exactly one
10161 10159 * close for every open.
10162 10160 */
10163 10161 if (otyp == OTYP_LYR) {
10164 10162 un->un_layer_count++;
10165 10163 }
10166 10164
10167 10165 /*
10168 10166 * Keep a count of the current # of opens in progress. This is because
10169 10167 * some layered drivers try to call us as a regular open. This can
10170 10168 * cause problems that we cannot prevent, however by keeping this count
10171 10169 * we can at least keep our open and detach routines from racing against
10172 10170 * each other under such conditions.
10173 10171 */
10174 10172 un->un_opens_in_progress++;
10175 10173 mutex_exit(&sd_detach_mutex);
10176 10174
10177 10175 nodelay = (flag & (FNDELAY | FNONBLOCK));
10178 10176 part = SDPART(dev);
10179 10177 partmask = 1 << part;
10180 10178
10181 10179 /*
10182 10180 * We use a semaphore here in order to serialize
10183 10181 * open and close requests on the device.
10184 10182 */
10185 10183 sema_p(&un->un_semoclose);
10186 10184
10187 10185 mutex_enter(SD_MUTEX(un));
10188 10186
10189 10187 /*
10190 10188 * All device accesses go thru sdstrategy() where we check
10191 10189 * on suspend status but there could be a scsi_poll command,
10192 10190 * which bypasses sdstrategy(), so we need to check pm
10193 10191 * status.
10194 10192 */
10195 10193
10196 10194 if (!nodelay) {
10197 10195 while ((un->un_state == SD_STATE_SUSPENDED) ||
10198 10196 (un->un_state == SD_STATE_PM_CHANGING)) {
10199 10197 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10200 10198 }
10201 10199
10202 10200 mutex_exit(SD_MUTEX(un));
10203 10201 if (sd_pm_entry(un) != DDI_SUCCESS) {
10204 10202 rval = EIO;
10205 10203 SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10206 10204 "sdopen: sd_pm_entry failed\n");
10207 10205 goto open_failed_with_pm;
10208 10206 }
10209 10207 mutex_enter(SD_MUTEX(un));
10210 10208 }
10211 10209
10212 10210 /* check for previous exclusive open */
10213 10211 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10214 10212 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10215 10213 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10216 10214 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10217 10215
10218 10216 if (un->un_exclopen & (partmask)) {
10219 10217 goto excl_open_fail;
10220 10218 }
10221 10219
10222 10220 if (flag & FEXCL) {
10223 10221 int i;
10224 10222 if (un->un_ocmap.lyropen[part]) {
10225 10223 goto excl_open_fail;
10226 10224 }
10227 10225 for (i = 0; i < (OTYPCNT - 1); i++) {
10228 10226 if (un->un_ocmap.regopen[i] & (partmask)) {
10229 10227 goto excl_open_fail;
10230 10228 }
10231 10229 }
10232 10230 }
10233 10231
10234 10232 /*
10235 10233 * Check the write permission if this is a removable media device,
10236 10234 * NDELAY has not been set, and writable permission is requested.
10237 10235 *
10238 10236 * Note: If NDELAY was set and this is write-protected media the WRITE
10239 10237 * attempt will fail with EIO as part of the I/O processing. This is a
10240 10238 * more permissive implementation that allows the open to succeed and
10241 10239 * WRITE attempts to fail when appropriate.
10242 10240 */
10243 10241 if (un->un_f_chk_wp_open) {
10244 10242 if ((flag & FWRITE) && (!nodelay)) {
10245 10243 mutex_exit(SD_MUTEX(un));
10246 10244 /*
10247 10245 * Defer the check for write permission on writable
10248 10246 * DVD drive till sdstrategy and will not fail open even
10249 10247 * if FWRITE is set as the device can be writable
10250 10248 * depending upon the media and the media can change
10251 10249 * after the call to open().
10252 10250 */
10253 10251 if (un->un_f_dvdram_writable_device == FALSE) {
10254 10252 if (ISCD(un) || sr_check_wp(dev)) {
10255 10253 rval = EROFS;
10256 10254 mutex_enter(SD_MUTEX(un));
10257 10255 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10258 10256 "write to cd or write protected media\n");
10259 10257 goto open_fail;
10260 10258 }
10261 10259 }
10262 10260 mutex_enter(SD_MUTEX(un));
10263 10261 }
10264 10262 }
10265 10263
10266 10264 /*
10267 10265 * If opening in NDELAY/NONBLOCK mode, just return.
10268 10266 * Check if disk is ready and has a valid geometry later.
10269 10267 */
10270 10268 if (!nodelay) {
10271 10269 sd_ssc_t *ssc;
10272 10270
10273 10271 mutex_exit(SD_MUTEX(un));
10274 10272 ssc = sd_ssc_init(un);
10275 10273 rval = sd_ready_and_valid(ssc, part);
10276 10274 sd_ssc_fini(ssc);
10277 10275 mutex_enter(SD_MUTEX(un));
10278 10276 /*
10279 10277 * Fail if device is not ready or if the number of disk
10280 10278 * blocks is zero or negative for non CD devices.
10281 10279 */
10282 10280
10283 10281 nblks = 0;
10284 10282
10285 10283 if (rval == SD_READY_VALID && (!ISCD(un))) {
10286 10284 /* if cmlb_partinfo fails, nblks remains 0 */
10287 10285 mutex_exit(SD_MUTEX(un));
10288 10286 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
10289 10287 NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
10290 10288 mutex_enter(SD_MUTEX(un));
10291 10289 }
10292 10290
10293 10291 if ((rval != SD_READY_VALID) ||
10294 10292 (!ISCD(un) && nblks <= 0)) {
10295 10293 rval = un->un_f_has_removable_media ? ENXIO : EIO;
10296 10294 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10297 10295 "device not ready or invalid disk block value\n");
10298 10296 goto open_fail;
10299 10297 }
10300 10298 #if defined(__i386) || defined(__amd64)
10301 10299 } else {
10302 10300 uchar_t *cp;
10303 10301 /*
10304 10302 * x86 requires special nodelay handling, so that p0 is
10305 10303 * always defined and accessible.
10306 10304 * Invalidate geometry only if device is not already open.
10307 10305 */
10308 10306 cp = &un->un_ocmap.chkd[0];
10309 10307 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10310 10308 if (*cp != (uchar_t)0) {
10311 10309 break;
10312 10310 }
10313 10311 cp++;
10314 10312 }
10315 10313 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10316 10314 mutex_exit(SD_MUTEX(un));
10317 10315 cmlb_invalidate(un->un_cmlbhandle,
10318 10316 (void *)SD_PATH_DIRECT);
10319 10317 mutex_enter(SD_MUTEX(un));
10320 10318 }
10321 10319
10322 10320 #endif
10323 10321 }
10324 10322
10325 10323 if (otyp == OTYP_LYR) {
10326 10324 un->un_ocmap.lyropen[part]++;
10327 10325 } else {
10328 10326 un->un_ocmap.regopen[otyp] |= partmask;
10329 10327 }
10330 10328
10331 10329 /* Set up open and exclusive open flags */
10332 10330 if (flag & FEXCL) {
10333 10331 un->un_exclopen |= (partmask);
10334 10332 }
10335 10333
10336 10334 /*
10337 10335 * If the lun is EFI labeled and lun capacity is greater than the
10338 10336 * capacity contained in the label, log a sys-event to notify the
10339 10337 * interested module.
10340 10338 * To avoid an infinite loop of logging sys-event, we only log the
10341 10339 * event when the lun is not opened in NDELAY mode. The event handler
10342 10340 * should open the lun in NDELAY mode.
10343 10341 */
10344 10342 if (!nodelay) {
10345 10343 mutex_exit(SD_MUTEX(un));
10346 10344 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
10347 10345 (void*)SD_PATH_DIRECT) == 0) {
10348 10346 mutex_enter(SD_MUTEX(un));
10349 10347 if (un->un_f_blockcount_is_valid &&
10350 10348 un->un_blockcount > label_cap &&
10351 10349 un->un_f_expnevent == B_FALSE) {
10352 10350 un->un_f_expnevent = B_TRUE;
10353 10351 mutex_exit(SD_MUTEX(un));
10354 10352 sd_log_lun_expansion_event(un,
10355 10353 (nodelay ? KM_NOSLEEP : KM_SLEEP));
10356 10354 mutex_enter(SD_MUTEX(un));
10357 10355 }
10358 10356 } else {
10359 10357 mutex_enter(SD_MUTEX(un));
10360 10358 }
10361 10359 }
10362 10360
10363 10361 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10364 10362 "open of part %d type %d\n", part, otyp);
10365 10363
10366 10364 mutex_exit(SD_MUTEX(un));
10367 10365 if (!nodelay) {
10368 10366 sd_pm_exit(un);
10369 10367 }
10370 10368
10371 10369 sema_v(&un->un_semoclose);
10372 10370
10373 10371 mutex_enter(&sd_detach_mutex);
10374 10372 un->un_opens_in_progress--;
10375 10373 mutex_exit(&sd_detach_mutex);
10376 10374
10377 10375 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
10378 10376 return (DDI_SUCCESS);
10379 10377
10380 10378 excl_open_fail:
10381 10379 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
10382 10380 rval = EBUSY;
10383 10381
10384 10382 open_fail:
10385 10383 mutex_exit(SD_MUTEX(un));
10386 10384
10387 10385 /*
10388 10386 * On a failed open we must exit the pm management.
10389 10387 */
10390 10388 if (!nodelay) {
10391 10389 sd_pm_exit(un);
10392 10390 }
10393 10391 open_failed_with_pm:
10394 10392 sema_v(&un->un_semoclose);
10395 10393
10396 10394 mutex_enter(&sd_detach_mutex);
10397 10395 un->un_opens_in_progress--;
10398 10396 if (otyp == OTYP_LYR) {
10399 10397 un->un_layer_count--;
10400 10398 }
10401 10399 mutex_exit(&sd_detach_mutex);
10402 10400
10403 10401 return (rval);
10404 10402 }
10405 10403
10406 10404
10407 10405 /*
10408 10406 * Function: sdclose
10409 10407 *
10410 10408 * Description: Driver's close(9e) entry point function.
10411 10409 *
10412 10410 * Arguments: dev - device number
10413 10411 * flag - file status flag, informational only
10414 10412 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10415 10413 * cred_p - user credential pointer
10416 10414 *
10417 10415 * Return Code: ENXIO
10418 10416 *
10419 10417 * Context: Kernel thread context
10420 10418 */
10421 10419 /* ARGSUSED */
10422 10420 static int
10423 10421 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
10424 10422 {
10425 10423 struct sd_lun *un;
10426 10424 uchar_t *cp;
10427 10425 int part;
10428 10426 int nodelay;
10429 10427 int rval = 0;
10430 10428
10431 10429 /* Validate the open type */
10432 10430 if (otyp >= OTYPCNT) {
10433 10431 return (ENXIO);
10434 10432 }
10435 10433
10436 10434 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10437 10435 return (ENXIO);
10438 10436 }
10439 10437
10440 10438 part = SDPART(dev);
10441 10439 nodelay = flag & (FNDELAY | FNONBLOCK);
10442 10440
10443 10441 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10444 10442 "sdclose: close of part %d type %d\n", part, otyp);
10445 10443
10446 10444 /*
10447 10445 * We use a semaphore here in order to serialize
10448 10446 * open and close requests on the device.
10449 10447 */
10450 10448 sema_p(&un->un_semoclose);
10451 10449
10452 10450 mutex_enter(SD_MUTEX(un));
10453 10451
10454 10452 /* Don't proceed if power is being changed. */
10455 10453 while (un->un_state == SD_STATE_PM_CHANGING) {
10456 10454 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10457 10455 }
10458 10456
10459 10457 if (un->un_exclopen & (1 << part)) {
10460 10458 un->un_exclopen &= ~(1 << part);
10461 10459 }
10462 10460
10463 10461 /* Update the open partition map */
10464 10462 if (otyp == OTYP_LYR) {
10465 10463 un->un_ocmap.lyropen[part] -= 1;
10466 10464 } else {
10467 10465 un->un_ocmap.regopen[otyp] &= ~(1 << part);
10468 10466 }
10469 10467
10470 10468 cp = &un->un_ocmap.chkd[0];
10471 10469 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10472 10470 if (*cp != NULL) {
10473 10471 break;
10474 10472 }
10475 10473 cp++;
10476 10474 }
10477 10475
10478 10476 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10479 10477 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
10480 10478
10481 10479 /*
10482 10480 * We avoid persistance upon the last close, and set
10483 10481 * the throttle back to the maximum.
10484 10482 */
10485 10483 un->un_throttle = un->un_saved_throttle;
10486 10484
10487 10485 if (un->un_state == SD_STATE_OFFLINE) {
10488 10486 if (un->un_f_is_fibre == FALSE) {
10489 10487 scsi_log(SD_DEVINFO(un), sd_label,
10490 10488 CE_WARN, "offline\n");
10491 10489 }
10492 10490 mutex_exit(SD_MUTEX(un));
10493 10491 cmlb_invalidate(un->un_cmlbhandle,
10494 10492 (void *)SD_PATH_DIRECT);
10495 10493 mutex_enter(SD_MUTEX(un));
10496 10494
10497 10495 } else {
10498 10496 /*
10499 10497 * Flush any outstanding writes in NVRAM cache.
10500 10498 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10501 10499 * cmd, it may not work for non-Pluto devices.
10502 10500 * SYNCHRONIZE CACHE is not required for removables,
10503 10501 * except DVD-RAM drives.
10504 10502 *
10505 10503 * Also note: because SYNCHRONIZE CACHE is currently
10506 10504 * the only command issued here that requires the
10507 10505 * drive be powered up, only do the power up before
10508 10506 * sending the Sync Cache command. If additional
10509 10507 * commands are added which require a powered up
10510 10508 * drive, the following sequence may have to change.
10511 10509 *
10512 10510 * And finally, note that parallel SCSI on SPARC
10513 10511 * only issues a Sync Cache to DVD-RAM, a newly
10514 10512 * supported device.
10515 10513 */
10516 10514 #if defined(__i386) || defined(__amd64)
10517 10515 if ((un->un_f_sync_cache_supported &&
10518 10516 un->un_f_sync_cache_required) ||
10519 10517 un->un_f_dvdram_writable_device == TRUE) {
10520 10518 #else
10521 10519 if (un->un_f_dvdram_writable_device == TRUE) {
10522 10520 #endif
10523 10521 mutex_exit(SD_MUTEX(un));
10524 10522 if (sd_pm_entry(un) == DDI_SUCCESS) {
10525 10523 rval =
10526 10524 sd_send_scsi_SYNCHRONIZE_CACHE(un,
10527 10525 NULL);
10528 10526 /* ignore error if not supported */
10529 10527 if (rval == ENOTSUP) {
10530 10528 rval = 0;
10531 10529 } else if (rval != 0) {
10532 10530 rval = EIO;
10533 10531 }
10534 10532 sd_pm_exit(un);
10535 10533 } else {
10536 10534 rval = EIO;
10537 10535 }
10538 10536 mutex_enter(SD_MUTEX(un));
10539 10537 }
10540 10538
10541 10539 /*
10542 10540 * For devices which supports DOOR_LOCK, send an ALLOW
10543 10541 * MEDIA REMOVAL command, but don't get upset if it
10544 10542 * fails. We need to raise the power of the drive before
10545 10543 * we can call sd_send_scsi_DOORLOCK()
10546 10544 */
10547 10545 if (un->un_f_doorlock_supported) {
10548 10546 mutex_exit(SD_MUTEX(un));
10549 10547 if (sd_pm_entry(un) == DDI_SUCCESS) {
10550 10548 sd_ssc_t *ssc;
10551 10549
10552 10550 ssc = sd_ssc_init(un);
10553 10551 rval = sd_send_scsi_DOORLOCK(ssc,
10554 10552 SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
10555 10553 if (rval != 0)
10556 10554 sd_ssc_assessment(ssc,
10557 10555 SD_FMT_IGNORE);
10558 10556 sd_ssc_fini(ssc);
10559 10557
10560 10558 sd_pm_exit(un);
10561 10559 if (ISCD(un) && (rval != 0) &&
10562 10560 (nodelay != 0)) {
10563 10561 rval = ENXIO;
10564 10562 }
10565 10563 } else {
10566 10564 rval = EIO;
10567 10565 }
10568 10566 mutex_enter(SD_MUTEX(un));
10569 10567 }
10570 10568
10571 10569 /*
10572 10570 * If a device has removable media, invalidate all
10573 10571 * parameters related to media, such as geometry,
10574 10572 * blocksize, and blockcount.
10575 10573 */
10576 10574 if (un->un_f_has_removable_media) {
10577 10575 sr_ejected(un);
10578 10576 }
10579 10577
10580 10578 /*
10581 10579 * Destroy the cache (if it exists) which was
10582 10580 * allocated for the write maps since this is
10583 10581 * the last close for this media.
10584 10582 */
10585 10583 if (un->un_wm_cache) {
10586 10584 /*
10587 10585 * Check if there are pending commands.
10588 10586 * and if there are give a warning and
10589 10587 * do not destroy the cache.
10590 10588 */
10591 10589 if (un->un_ncmds_in_driver > 0) {
10592 10590 scsi_log(SD_DEVINFO(un),
10593 10591 sd_label, CE_WARN,
10594 10592 "Unable to clean up memory "
10595 10593 "because of pending I/O\n");
10596 10594 } else {
10597 10595 kmem_cache_destroy(
10598 10596 un->un_wm_cache);
10599 10597 un->un_wm_cache = NULL;
10600 10598 }
10601 10599 }
10602 10600 }
10603 10601 }
10604 10602
10605 10603 mutex_exit(SD_MUTEX(un));
10606 10604 sema_v(&un->un_semoclose);
10607 10605
10608 10606 if (otyp == OTYP_LYR) {
10609 10607 mutex_enter(&sd_detach_mutex);
10610 10608 /*
10611 10609 * The detach routine may run when the layer count
10612 10610 * drops to zero.
10613 10611 */
10614 10612 un->un_layer_count--;
10615 10613 mutex_exit(&sd_detach_mutex);
10616 10614 }
10617 10615
10618 10616 return (rval);
10619 10617 }
10620 10618
10621 10619
10622 10620 /*
10623 10621 * Function: sd_ready_and_valid
10624 10622 *
10625 10623 * Description: Test if device is ready and has a valid geometry.
10626 10624 *
10627 10625 * Arguments: ssc - sd_ssc_t will contain un
10628 10626 * un - driver soft state (unit) structure
10629 10627 *
10630 10628 * Return Code: SD_READY_VALID ready and valid label
10631 10629 * SD_NOT_READY_VALID not ready, no label
10632 10630 * SD_RESERVED_BY_OTHERS reservation conflict
10633 10631 *
10634 10632 * Context: Never called at interrupt context.
10635 10633 */
10636 10634
10637 10635 static int
10638 10636 sd_ready_and_valid(sd_ssc_t *ssc, int part)
10639 10637 {
10640 10638 struct sd_errstats *stp;
10641 10639 uint64_t capacity;
10642 10640 uint_t lbasize;
10643 10641 int rval = SD_READY_VALID;
10644 10642 char name_str[48];
10645 10643 boolean_t is_valid;
10646 10644 struct sd_lun *un;
10647 10645 int status;
10648 10646
10649 10647 ASSERT(ssc != NULL);
10650 10648 un = ssc->ssc_un;
10651 10649 ASSERT(un != NULL);
10652 10650 ASSERT(!mutex_owned(SD_MUTEX(un)));
10653 10651
10654 10652 mutex_enter(SD_MUTEX(un));
10655 10653 /*
10656 10654 * If a device has removable media, we must check if media is
10657 10655 * ready when checking if this device is ready and valid.
10658 10656 */
10659 10657 if (un->un_f_has_removable_media) {
10660 10658 mutex_exit(SD_MUTEX(un));
10661 10659 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10662 10660
10663 10661 if (status != 0) {
10664 10662 rval = SD_NOT_READY_VALID;
10665 10663 mutex_enter(SD_MUTEX(un));
10666 10664
10667 10665 /* Ignore all failed status for removalbe media */
10668 10666 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10669 10667
10670 10668 goto done;
10671 10669 }
10672 10670
10673 10671 is_valid = SD_IS_VALID_LABEL(un);
10674 10672 mutex_enter(SD_MUTEX(un));
10675 10673 if (!is_valid ||
10676 10674 (un->un_f_blockcount_is_valid == FALSE) ||
10677 10675 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
10678 10676
10679 10677 /* capacity has to be read every open. */
10680 10678 mutex_exit(SD_MUTEX(un));
10681 10679 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
10682 10680 &lbasize, SD_PATH_DIRECT);
10683 10681
10684 10682 if (status != 0) {
10685 10683 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10686 10684
10687 10685 cmlb_invalidate(un->un_cmlbhandle,
10688 10686 (void *)SD_PATH_DIRECT);
10689 10687 mutex_enter(SD_MUTEX(un));
10690 10688 rval = SD_NOT_READY_VALID;
10691 10689
10692 10690 goto done;
10693 10691 } else {
10694 10692 mutex_enter(SD_MUTEX(un));
10695 10693 sd_update_block_info(un, lbasize, capacity);
10696 10694 }
10697 10695 }
10698 10696
10699 10697 /*
10700 10698 * Check if the media in the device is writable or not.
10701 10699 */
10702 10700 if (!is_valid && ISCD(un)) {
10703 10701 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10704 10702 }
10705 10703
10706 10704 } else {
10707 10705 /*
10708 10706 * Do a test unit ready to clear any unit attention from non-cd
10709 10707 * devices.
10710 10708 */
10711 10709 mutex_exit(SD_MUTEX(un));
10712 10710
10713 10711 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10714 10712 if (status != 0) {
10715 10713 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10716 10714 }
10717 10715
10718 10716 mutex_enter(SD_MUTEX(un));
10719 10717 }
10720 10718
10721 10719
10722 10720 /*
10723 10721 * If this is a non 512 block device, allocate space for
10724 10722 * the wmap cache. This is being done here since every time
10725 10723 * a media is changed this routine will be called and the
10726 10724 * block size is a function of media rather than device.
10727 10725 */
10728 10726 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
10729 10727 un->un_f_non_devbsize_supported) &&
10730 10728 un->un_tgt_blocksize != DEV_BSIZE) ||
10731 10729 un->un_f_enable_rmw) {
10732 10730 if (!(un->un_wm_cache)) {
10733 10731 (void) snprintf(name_str, sizeof (name_str),
10734 10732 "%s%d_cache",
10735 10733 ddi_driver_name(SD_DEVINFO(un)),
10736 10734 ddi_get_instance(SD_DEVINFO(un)));
10737 10735 un->un_wm_cache = kmem_cache_create(
10738 10736 name_str, sizeof (struct sd_w_map),
10739 10737 8, sd_wm_cache_constructor,
10740 10738 sd_wm_cache_destructor, NULL,
10741 10739 (void *)un, NULL, 0);
10742 10740 if (!(un->un_wm_cache)) {
10743 10741 rval = ENOMEM;
10744 10742 goto done;
10745 10743 }
10746 10744 }
10747 10745 }
10748 10746
10749 10747 if (un->un_state == SD_STATE_NORMAL) {
10750 10748 /*
10751 10749 * If the target is not yet ready here (defined by a TUR
10752 10750 * failure), invalidate the geometry and print an 'offline'
10753 10751 * message. This is a legacy message, as the state of the
10754 10752 * target is not actually changed to SD_STATE_OFFLINE.
10755 10753 *
10756 10754 * If the TUR fails for EACCES (Reservation Conflict),
10757 10755 * SD_RESERVED_BY_OTHERS will be returned to indicate
10758 10756 * reservation conflict. If the TUR fails for other
10759 10757 * reasons, SD_NOT_READY_VALID will be returned.
10760 10758 */
10761 10759 int err;
10762 10760
10763 10761 mutex_exit(SD_MUTEX(un));
10764 10762 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10765 10763 mutex_enter(SD_MUTEX(un));
10766 10764
10767 10765 if (err != 0) {
10768 10766 mutex_exit(SD_MUTEX(un));
10769 10767 cmlb_invalidate(un->un_cmlbhandle,
10770 10768 (void *)SD_PATH_DIRECT);
10771 10769 mutex_enter(SD_MUTEX(un));
10772 10770 if (err == EACCES) {
10773 10771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10774 10772 "reservation conflict\n");
10775 10773 rval = SD_RESERVED_BY_OTHERS;
10776 10774 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10777 10775 } else {
10778 10776 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10779 10777 "drive offline\n");
10780 10778 rval = SD_NOT_READY_VALID;
10781 10779 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
10782 10780 }
10783 10781 goto done;
10784 10782 }
10785 10783 }
10786 10784
10787 10785 if (un->un_f_format_in_progress == FALSE) {
10788 10786 mutex_exit(SD_MUTEX(un));
10789 10787
10790 10788 (void) cmlb_validate(un->un_cmlbhandle, 0,
10791 10789 (void *)SD_PATH_DIRECT);
10792 10790 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
10793 10791 NULL, (void *) SD_PATH_DIRECT) != 0) {
10794 10792 rval = SD_NOT_READY_VALID;
10795 10793 mutex_enter(SD_MUTEX(un));
10796 10794
10797 10795 goto done;
10798 10796 }
10799 10797 if (un->un_f_pkstats_enabled) {
10800 10798 sd_set_pstats(un);
10801 10799 SD_TRACE(SD_LOG_IO_PARTITION, un,
10802 10800 "sd_ready_and_valid: un:0x%p pstats created and "
10803 10801 "set\n", un);
10804 10802 }
10805 10803 mutex_enter(SD_MUTEX(un));
10806 10804 }
10807 10805
10808 10806 /*
10809 10807 * If this device supports DOOR_LOCK command, try and send
10810 10808 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10811 10809 * if it fails. For a CD, however, it is an error
10812 10810 */
10813 10811 if (un->un_f_doorlock_supported) {
10814 10812 mutex_exit(SD_MUTEX(un));
10815 10813 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
10816 10814 SD_PATH_DIRECT);
10817 10815
10818 10816 if ((status != 0) && ISCD(un)) {
10819 10817 rval = SD_NOT_READY_VALID;
10820 10818 mutex_enter(SD_MUTEX(un));
10821 10819
10822 10820 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10823 10821
10824 10822 goto done;
10825 10823 } else if (status != 0)
10826 10824 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10827 10825 mutex_enter(SD_MUTEX(un));
10828 10826 }
10829 10827
10830 10828 /* The state has changed, inform the media watch routines */
10831 10829 un->un_mediastate = DKIO_INSERTED;
10832 10830 cv_broadcast(&un->un_state_cv);
10833 10831 rval = SD_READY_VALID;
10834 10832
10835 10833 done:
10836 10834
10837 10835 /*
10838 10836 * Initialize the capacity kstat value, if no media previously
10839 10837 * (capacity kstat is 0) and a media has been inserted
10840 10838 * (un_blockcount > 0).
10841 10839 */
10842 10840 if (un->un_errstats != NULL) {
10843 10841 stp = (struct sd_errstats *)un->un_errstats->ks_data;
10844 10842 if ((stp->sd_capacity.value.ui64 == 0) &&
10845 10843 (un->un_f_blockcount_is_valid == TRUE)) {
10846 10844 stp->sd_capacity.value.ui64 =
10847 10845 (uint64_t)((uint64_t)un->un_blockcount *
10848 10846 un->un_sys_blocksize);
10849 10847 }
10850 10848 }
10851 10849
10852 10850 mutex_exit(SD_MUTEX(un));
10853 10851 return (rval);
10854 10852 }
10855 10853
10856 10854
10857 10855 /*
10858 10856 * Function: sdmin
10859 10857 *
10860 10858 * Description: Routine to limit the size of a data transfer. Used in
10861 10859 * conjunction with physio(9F).
10862 10860 *
10863 10861 * Arguments: bp - pointer to the indicated buf(9S) struct.
10864 10862 *
10865 10863 * Context: Kernel thread context.
10866 10864 */
10867 10865
10868 10866 static void
10869 10867 sdmin(struct buf *bp)
10870 10868 {
10871 10869 struct sd_lun *un;
10872 10870 int instance;
10873 10871
10874 10872 instance = SDUNIT(bp->b_edev);
10875 10873
10876 10874 un = ddi_get_soft_state(sd_state, instance);
10877 10875 ASSERT(un != NULL);
10878 10876
10879 10877 /*
10880 10878 * We depend on buf breakup to restrict
10881 10879 * IO size if it is enabled.
10882 10880 */
10883 10881 if (un->un_buf_breakup_supported) {
10884 10882 return;
10885 10883 }
10886 10884
10887 10885 if (bp->b_bcount > un->un_max_xfer_size) {
10888 10886 bp->b_bcount = un->un_max_xfer_size;
10889 10887 }
10890 10888 }
10891 10889
10892 10890
10893 10891 /*
10894 10892 * Function: sdread
10895 10893 *
10896 10894 * Description: Driver's read(9e) entry point function.
10897 10895 *
10898 10896 * Arguments: dev - device number
10899 10897 * uio - structure pointer describing where data is to be stored
10900 10898 * in user's space
10901 10899 * cred_p - user credential pointer
10902 10900 *
10903 10901 * Return Code: ENXIO
10904 10902 * EIO
10905 10903 * EINVAL
10906 10904 * value returned by physio
10907 10905 *
10908 10906 * Context: Kernel thread context.
10909 10907 */
10910 10908 /* ARGSUSED */
10911 10909 static int
10912 10910 sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10913 10911 {
10914 10912 struct sd_lun *un = NULL;
10915 10913 int secmask;
10916 10914 int err = 0;
10917 10915 sd_ssc_t *ssc;
10918 10916
10919 10917 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10920 10918 return (ENXIO);
10921 10919 }
10922 10920
10923 10921 ASSERT(!mutex_owned(SD_MUTEX(un)));
10924 10922
10925 10923
10926 10924 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10927 10925 mutex_enter(SD_MUTEX(un));
10928 10926 /*
10929 10927 * Because the call to sd_ready_and_valid will issue I/O we
10930 10928 * must wait here if either the device is suspended or
10931 10929 * if it's power level is changing.
10932 10930 */
10933 10931 while ((un->un_state == SD_STATE_SUSPENDED) ||
10934 10932 (un->un_state == SD_STATE_PM_CHANGING)) {
10935 10933 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10936 10934 }
10937 10935 un->un_ncmds_in_driver++;
10938 10936 mutex_exit(SD_MUTEX(un));
10939 10937
10940 10938 /* Initialize sd_ssc_t for internal uscsi commands */
10941 10939 ssc = sd_ssc_init(un);
10942 10940 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10943 10941 err = EIO;
10944 10942 } else {
10945 10943 err = 0;
10946 10944 }
10947 10945 sd_ssc_fini(ssc);
10948 10946
10949 10947 mutex_enter(SD_MUTEX(un));
10950 10948 un->un_ncmds_in_driver--;
10951 10949 ASSERT(un->un_ncmds_in_driver >= 0);
10952 10950 mutex_exit(SD_MUTEX(un));
10953 10951 if (err != 0)
10954 10952 return (err);
10955 10953 }
10956 10954
10957 10955 /*
10958 10956 * Read requests are restricted to multiples of the system block size.
10959 10957 */
10960 10958 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
10961 10959 !un->un_f_enable_rmw)
10962 10960 secmask = un->un_tgt_blocksize - 1;
10963 10961 else
10964 10962 secmask = DEV_BSIZE - 1;
10965 10963
10966 10964 if (uio->uio_loffset & ((offset_t)(secmask))) {
10967 10965 SD_ERROR(SD_LOG_READ_WRITE, un,
10968 10966 "sdread: file offset not modulo %d\n",
10969 10967 secmask + 1);
10970 10968 err = EINVAL;
10971 10969 } else if (uio->uio_iov->iov_len & (secmask)) {
10972 10970 SD_ERROR(SD_LOG_READ_WRITE, un,
10973 10971 "sdread: transfer length not modulo %d\n",
10974 10972 secmask + 1);
10975 10973 err = EINVAL;
10976 10974 } else {
10977 10975 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
10978 10976 }
10979 10977
10980 10978 return (err);
10981 10979 }
10982 10980
10983 10981
10984 10982 /*
10985 10983 * Function: sdwrite
10986 10984 *
10987 10985 * Description: Driver's write(9e) entry point function.
10988 10986 *
10989 10987 * Arguments: dev - device number
10990 10988 * uio - structure pointer describing where data is stored in
10991 10989 * user's space
10992 10990 * cred_p - user credential pointer
10993 10991 *
10994 10992 * Return Code: ENXIO
10995 10993 * EIO
10996 10994 * EINVAL
10997 10995 * value returned by physio
10998 10996 *
10999 10997 * Context: Kernel thread context.
11000 10998 */
11001 10999 /* ARGSUSED */
11002 11000 static int
11003 11001 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
11004 11002 {
11005 11003 struct sd_lun *un = NULL;
11006 11004 int secmask;
11007 11005 int err = 0;
11008 11006 sd_ssc_t *ssc;
11009 11007
11010 11008 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11011 11009 return (ENXIO);
11012 11010 }
11013 11011
11014 11012 ASSERT(!mutex_owned(SD_MUTEX(un)));
11015 11013
11016 11014 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11017 11015 mutex_enter(SD_MUTEX(un));
11018 11016 /*
11019 11017 * Because the call to sd_ready_and_valid will issue I/O we
11020 11018 * must wait here if either the device is suspended or
11021 11019 * if it's power level is changing.
11022 11020 */
11023 11021 while ((un->un_state == SD_STATE_SUSPENDED) ||
11024 11022 (un->un_state == SD_STATE_PM_CHANGING)) {
11025 11023 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11026 11024 }
11027 11025 un->un_ncmds_in_driver++;
11028 11026 mutex_exit(SD_MUTEX(un));
11029 11027
11030 11028 /* Initialize sd_ssc_t for internal uscsi commands */
11031 11029 ssc = sd_ssc_init(un);
11032 11030 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11033 11031 err = EIO;
11034 11032 } else {
11035 11033 err = 0;
11036 11034 }
11037 11035 sd_ssc_fini(ssc);
11038 11036
11039 11037 mutex_enter(SD_MUTEX(un));
11040 11038 un->un_ncmds_in_driver--;
11041 11039 ASSERT(un->un_ncmds_in_driver >= 0);
11042 11040 mutex_exit(SD_MUTEX(un));
11043 11041 if (err != 0)
11044 11042 return (err);
11045 11043 }
11046 11044
11047 11045 /*
11048 11046 * Write requests are restricted to multiples of the system block size.
11049 11047 */
11050 11048 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11051 11049 !un->un_f_enable_rmw)
11052 11050 secmask = un->un_tgt_blocksize - 1;
11053 11051 else
11054 11052 secmask = DEV_BSIZE - 1;
11055 11053
11056 11054 if (uio->uio_loffset & ((offset_t)(secmask))) {
11057 11055 SD_ERROR(SD_LOG_READ_WRITE, un,
11058 11056 "sdwrite: file offset not modulo %d\n",
11059 11057 secmask + 1);
11060 11058 err = EINVAL;
11061 11059 } else if (uio->uio_iov->iov_len & (secmask)) {
11062 11060 SD_ERROR(SD_LOG_READ_WRITE, un,
11063 11061 "sdwrite: transfer length not modulo %d\n",
11064 11062 secmask + 1);
11065 11063 err = EINVAL;
11066 11064 } else {
11067 11065 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
11068 11066 }
11069 11067
11070 11068 return (err);
11071 11069 }
11072 11070
11073 11071
11074 11072 /*
11075 11073 * Function: sdaread
11076 11074 *
11077 11075 * Description: Driver's aread(9e) entry point function.
11078 11076 *
11079 11077 * Arguments: dev - device number
11080 11078 * aio - structure pointer describing where data is to be stored
11081 11079 * cred_p - user credential pointer
11082 11080 *
11083 11081 * Return Code: ENXIO
11084 11082 * EIO
11085 11083 * EINVAL
11086 11084 * value returned by aphysio
11087 11085 *
11088 11086 * Context: Kernel thread context.
11089 11087 */
11090 11088 /* ARGSUSED */
11091 11089 static int
11092 11090 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11093 11091 {
11094 11092 struct sd_lun *un = NULL;
11095 11093 struct uio *uio = aio->aio_uio;
11096 11094 int secmask;
11097 11095 int err = 0;
11098 11096 sd_ssc_t *ssc;
11099 11097
11100 11098 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11101 11099 return (ENXIO);
11102 11100 }
11103 11101
11104 11102 ASSERT(!mutex_owned(SD_MUTEX(un)));
11105 11103
11106 11104 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11107 11105 mutex_enter(SD_MUTEX(un));
11108 11106 /*
11109 11107 * Because the call to sd_ready_and_valid will issue I/O we
11110 11108 * must wait here if either the device is suspended or
11111 11109 * if it's power level is changing.
11112 11110 */
11113 11111 while ((un->un_state == SD_STATE_SUSPENDED) ||
11114 11112 (un->un_state == SD_STATE_PM_CHANGING)) {
11115 11113 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11116 11114 }
11117 11115 un->un_ncmds_in_driver++;
11118 11116 mutex_exit(SD_MUTEX(un));
11119 11117
11120 11118 /* Initialize sd_ssc_t for internal uscsi commands */
11121 11119 ssc = sd_ssc_init(un);
11122 11120 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11123 11121 err = EIO;
11124 11122 } else {
11125 11123 err = 0;
11126 11124 }
11127 11125 sd_ssc_fini(ssc);
11128 11126
11129 11127 mutex_enter(SD_MUTEX(un));
11130 11128 un->un_ncmds_in_driver--;
11131 11129 ASSERT(un->un_ncmds_in_driver >= 0);
11132 11130 mutex_exit(SD_MUTEX(un));
11133 11131 if (err != 0)
11134 11132 return (err);
11135 11133 }
11136 11134
11137 11135 /*
11138 11136 * Read requests are restricted to multiples of the system block size.
11139 11137 */
11140 11138 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11141 11139 !un->un_f_enable_rmw)
11142 11140 secmask = un->un_tgt_blocksize - 1;
11143 11141 else
11144 11142 secmask = DEV_BSIZE - 1;
11145 11143
11146 11144 if (uio->uio_loffset & ((offset_t)(secmask))) {
11147 11145 SD_ERROR(SD_LOG_READ_WRITE, un,
11148 11146 "sdaread: file offset not modulo %d\n",
11149 11147 secmask + 1);
11150 11148 err = EINVAL;
11151 11149 } else if (uio->uio_iov->iov_len & (secmask)) {
11152 11150 SD_ERROR(SD_LOG_READ_WRITE, un,
11153 11151 "sdaread: transfer length not modulo %d\n",
11154 11152 secmask + 1);
11155 11153 err = EINVAL;
11156 11154 } else {
11157 11155 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
11158 11156 }
11159 11157
11160 11158 return (err);
11161 11159 }
11162 11160
11163 11161
11164 11162 /*
11165 11163 * Function: sdawrite
11166 11164 *
11167 11165 * Description: Driver's awrite(9e) entry point function.
11168 11166 *
11169 11167 * Arguments: dev - device number
11170 11168 * aio - structure pointer describing where data is stored
11171 11169 * cred_p - user credential pointer
11172 11170 *
11173 11171 * Return Code: ENXIO
11174 11172 * EIO
11175 11173 * EINVAL
11176 11174 * value returned by aphysio
11177 11175 *
11178 11176 * Context: Kernel thread context.
11179 11177 */
11180 11178 /* ARGSUSED */
11181 11179 static int
11182 11180 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11183 11181 {
11184 11182 struct sd_lun *un = NULL;
11185 11183 struct uio *uio = aio->aio_uio;
11186 11184 int secmask;
11187 11185 int err = 0;
11188 11186 sd_ssc_t *ssc;
11189 11187
11190 11188 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11191 11189 return (ENXIO);
11192 11190 }
11193 11191
11194 11192 ASSERT(!mutex_owned(SD_MUTEX(un)));
11195 11193
11196 11194 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11197 11195 mutex_enter(SD_MUTEX(un));
11198 11196 /*
11199 11197 * Because the call to sd_ready_and_valid will issue I/O we
11200 11198 * must wait here if either the device is suspended or
11201 11199 * if it's power level is changing.
11202 11200 */
11203 11201 while ((un->un_state == SD_STATE_SUSPENDED) ||
11204 11202 (un->un_state == SD_STATE_PM_CHANGING)) {
11205 11203 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11206 11204 }
11207 11205 un->un_ncmds_in_driver++;
11208 11206 mutex_exit(SD_MUTEX(un));
11209 11207
11210 11208 /* Initialize sd_ssc_t for internal uscsi commands */
11211 11209 ssc = sd_ssc_init(un);
11212 11210 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11213 11211 err = EIO;
11214 11212 } else {
11215 11213 err = 0;
11216 11214 }
11217 11215 sd_ssc_fini(ssc);
11218 11216
11219 11217 mutex_enter(SD_MUTEX(un));
11220 11218 un->un_ncmds_in_driver--;
11221 11219 ASSERT(un->un_ncmds_in_driver >= 0);
11222 11220 mutex_exit(SD_MUTEX(un));
11223 11221 if (err != 0)
11224 11222 return (err);
11225 11223 }
11226 11224
11227 11225 /*
11228 11226 * Write requests are restricted to multiples of the system block size.
11229 11227 */
11230 11228 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11231 11229 !un->un_f_enable_rmw)
11232 11230 secmask = un->un_tgt_blocksize - 1;
11233 11231 else
11234 11232 secmask = DEV_BSIZE - 1;
11235 11233
11236 11234 if (uio->uio_loffset & ((offset_t)(secmask))) {
11237 11235 SD_ERROR(SD_LOG_READ_WRITE, un,
11238 11236 "sdawrite: file offset not modulo %d\n",
11239 11237 secmask + 1);
11240 11238 err = EINVAL;
11241 11239 } else if (uio->uio_iov->iov_len & (secmask)) {
11242 11240 SD_ERROR(SD_LOG_READ_WRITE, un,
11243 11241 "sdawrite: transfer length not modulo %d\n",
11244 11242 secmask + 1);
11245 11243 err = EINVAL;
11246 11244 } else {
11247 11245 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
11248 11246 }
11249 11247
11250 11248 return (err);
11251 11249 }
11252 11250
11253 11251
11254 11252
11255 11253
11256 11254
11257 11255 /*
11258 11256 * Driver IO processing follows the following sequence:
11259 11257 *
11260 11258 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11261 11259 * | | ^
11262 11260 * v v |
11263 11261 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11264 11262 * | | | |
11265 11263 * v | | |
11266 11264 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11267 11265 * | | ^ ^
11268 11266 * v v | |
11269 11267 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11270 11268 * | | | |
11271 11269 * +---+ | +------------+ +-------+
11272 11270 * | | | |
11273 11271 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11274 11272 * | v | |
11275 11273 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11276 11274 * | | ^ |
11277 11275 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11278 11276 * | v | |
11279 11277 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11280 11278 * | | ^ |
11281 11279 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11282 11280 * | v | |
11283 11281 * | sd_checksum_iostart() sd_checksum_iodone() |
11284 11282 * | | ^ |
11285 11283 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11286 11284 * | v | |
11287 11285 * | sd_pm_iostart() sd_pm_iodone() |
11288 11286 * | | ^ |
11289 11287 * | | | |
11290 11288 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11291 11289 * | ^
11292 11290 * v |
11293 11291 * sd_core_iostart() |
11294 11292 * | |
11295 11293 * | +------>(*destroypkt)()
11296 11294 * +-> sd_start_cmds() <-+ | |
11297 11295 * | | | v
11298 11296 * | | | scsi_destroy_pkt(9F)
11299 11297 * | | |
11300 11298 * +->(*initpkt)() +- sdintr()
11301 11299 * | | | |
11302 11300 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11303 11301 * | +-> scsi_setup_cdb(9F) |
11304 11302 * | |
11305 11303 * +--> scsi_transport(9F) |
11306 11304 * | |
11307 11305 * +----> SCSA ---->+
11308 11306 *
11309 11307 *
11310 11308 * This code is based upon the following presumptions:
11311 11309 *
11312 11310 * - iostart and iodone functions operate on buf(9S) structures. These
11313 11311 * functions perform the necessary operations on the buf(9S) and pass
11314 11312 * them along to the next function in the chain by using the macros
11315 11313 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11316 11314 * (for iodone side functions).
11317 11315 *
11318 11316 * - The iostart side functions may sleep. The iodone side functions
11319 11317 * are called under interrupt context and may NOT sleep. Therefore
11320 11318 * iodone side functions also may not call iostart side functions.
11321 11319 * (NOTE: iostart side functions should NOT sleep for memory, as
11322 11320 * this could result in deadlock.)
11323 11321 *
11324 11322 * - An iostart side function may call its corresponding iodone side
11325 11323 * function directly (if necessary).
11326 11324 *
11327 11325 * - In the event of an error, an iostart side function can return a buf(9S)
11328 11326 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11329 11327 * b_error in the usual way of course).
11330 11328 *
11331 11329 * - The taskq mechanism may be used by the iodone side functions to dispatch
11332 11330 * requests to the iostart side functions. The iostart side functions in
11333 11331 * this case would be called under the context of a taskq thread, so it's
11334 11332 * OK for them to block/sleep/spin in this case.
11335 11333 *
11336 11334 * - iostart side functions may allocate "shadow" buf(9S) structs and
11337 11335 * pass them along to the next function in the chain. The corresponding
11338 11336 * iodone side functions must coalesce the "shadow" bufs and return
11339 11337 * the "original" buf to the next higher layer.
11340 11338 *
11341 11339 * - The b_private field of the buf(9S) struct holds a pointer to
11342 11340 * an sd_xbuf struct, which contains information needed to
11343 11341 * construct the scsi_pkt for the command.
11344 11342 *
11345 11343 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11346 11344 * layer must acquire & release the SD_MUTEX(un) as needed.
11347 11345 */
11348 11346
11349 11347
11350 11348 /*
11351 11349 * Create taskq for all targets in the system. This is created at
11352 11350 * _init(9E) and destroyed at _fini(9E).
11353 11351 *
11354 11352 * Note: here we set the minalloc to a reasonably high number to ensure that
11355 11353 * we will have an adequate supply of task entries available at interrupt time.
11356 11354 * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11357 11355 * sd_create_taskq(). Since we do not want to sleep for allocations at
11358 11356 * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11359 11357 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11360 11358 * requests any one instant in time.
11361 11359 */
11362 11360 #define SD_TASKQ_NUMTHREADS 8
11363 11361 #define SD_TASKQ_MINALLOC 256
11364 11362 #define SD_TASKQ_MAXALLOC 256
11365 11363
11366 11364 static taskq_t *sd_tq = NULL;
11367 11365 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11368 11366
11369 11367 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11370 11368 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11371 11369
11372 11370 /*
11373 11371 * The following task queue is being created for the write part of
11374 11372 * read-modify-write of non-512 block size devices.
11375 11373 * Limit the number of threads to 1 for now. This number has been chosen
11376 11374 * considering the fact that it applies only to dvd ram drives/MO drives
11377 11375 * currently. Performance for which is not main criteria at this stage.
11378 11376 * Note: It needs to be explored if we can use a single taskq in future
11379 11377 */
11380 11378 #define SD_WMR_TASKQ_NUMTHREADS 1
11381 11379 static taskq_t *sd_wmr_tq = NULL;
11382 11380 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11383 11381
11384 11382 /*
11385 11383 * Function: sd_taskq_create
11386 11384 *
11387 11385 * Description: Create taskq thread(s) and preallocate task entries
11388 11386 *
11389 11387 * Return Code: Returns a pointer to the allocated taskq_t.
11390 11388 *
11391 11389 * Context: Can sleep. Requires blockable context.
11392 11390 *
11393 11391 * Notes: - The taskq() facility currently is NOT part of the DDI.
11394 11392 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11395 11393 * - taskq_create() will block for memory, also it will panic
11396 11394 * if it cannot create the requested number of threads.
11397 11395 * - Currently taskq_create() creates threads that cannot be
11398 11396 * swapped.
11399 11397 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11400 11398 * supply of taskq entries at interrupt time (ie, so that we
11401 11399 * do not have to sleep for memory)
11402 11400 */
11403 11401
11404 11402 static void
11405 11403 sd_taskq_create(void)
11406 11404 {
11407 11405 char taskq_name[TASKQ_NAMELEN];
11408 11406
11409 11407 ASSERT(sd_tq == NULL);
11410 11408 ASSERT(sd_wmr_tq == NULL);
11411 11409
11412 11410 (void) snprintf(taskq_name, sizeof (taskq_name),
11413 11411 "%s_drv_taskq", sd_label);
11414 11412 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
11415 11413 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11416 11414 TASKQ_PREPOPULATE));
11417 11415
11418 11416 (void) snprintf(taskq_name, sizeof (taskq_name),
11419 11417 "%s_rmw_taskq", sd_label);
11420 11418 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
11421 11419 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11422 11420 TASKQ_PREPOPULATE));
11423 11421 }
11424 11422
11425 11423
11426 11424 /*
11427 11425 * Function: sd_taskq_delete
11428 11426 *
11429 11427 * Description: Complementary cleanup routine for sd_taskq_create().
11430 11428 *
11431 11429 * Context: Kernel thread context.
11432 11430 */
11433 11431
11434 11432 static void
11435 11433 sd_taskq_delete(void)
11436 11434 {
11437 11435 ASSERT(sd_tq != NULL);
11438 11436 ASSERT(sd_wmr_tq != NULL);
11439 11437 taskq_destroy(sd_tq);
11440 11438 taskq_destroy(sd_wmr_tq);
11441 11439 sd_tq = NULL;
11442 11440 sd_wmr_tq = NULL;
11443 11441 }
11444 11442
11445 11443
11446 11444 /*
11447 11445 * Function: sdstrategy
11448 11446 *
11449 11447 * Description: Driver's strategy (9E) entry point function.
11450 11448 *
11451 11449 * Arguments: bp - pointer to buf(9S)
11452 11450 *
11453 11451 * Return Code: Always returns zero
11454 11452 *
11455 11453 * Context: Kernel thread context.
11456 11454 */
11457 11455
11458 11456 static int
11459 11457 sdstrategy(struct buf *bp)
11460 11458 {
11461 11459 struct sd_lun *un;
11462 11460
11463 11461 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11464 11462 if (un == NULL) {
11465 11463 bioerror(bp, EIO);
11466 11464 bp->b_resid = bp->b_bcount;
11467 11465 biodone(bp);
11468 11466 return (0);
11469 11467 }
11470 11468
11471 11469 /* As was done in the past, fail new cmds. if state is dumping. */
11472 11470 if (un->un_state == SD_STATE_DUMPING) {
11473 11471 bioerror(bp, ENXIO);
11474 11472 bp->b_resid = bp->b_bcount;
11475 11473 biodone(bp);
11476 11474 return (0);
11477 11475 }
11478 11476
11479 11477 ASSERT(!mutex_owned(SD_MUTEX(un)));
11480 11478
11481 11479 /*
11482 11480 * Commands may sneak in while we released the mutex in
11483 11481 * DDI_SUSPEND, we should block new commands. However, old
11484 11482 * commands that are still in the driver at this point should
11485 11483 * still be allowed to drain.
11486 11484 */
11487 11485 mutex_enter(SD_MUTEX(un));
11488 11486 /*
11489 11487 * Must wait here if either the device is suspended or
11490 11488 * if it's power level is changing.
11491 11489 */
11492 11490 while ((un->un_state == SD_STATE_SUSPENDED) ||
11493 11491 (un->un_state == SD_STATE_PM_CHANGING)) {
11494 11492 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11495 11493 }
11496 11494
11497 11495 un->un_ncmds_in_driver++;
11498 11496
11499 11497 /*
11500 11498 * atapi: Since we are running the CD for now in PIO mode we need to
11501 11499 * call bp_mapin here to avoid bp_mapin called interrupt context under
11502 11500 * the HBA's init_pkt routine.
11503 11501 */
11504 11502 if (un->un_f_cfg_is_atapi == TRUE) {
11505 11503 mutex_exit(SD_MUTEX(un));
11506 11504 bp_mapin(bp);
11507 11505 mutex_enter(SD_MUTEX(un));
11508 11506 }
11509 11507 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
11510 11508 un->un_ncmds_in_driver);
11511 11509
11512 11510 if (bp->b_flags & B_WRITE)
11513 11511 un->un_f_sync_cache_required = TRUE;
11514 11512
11515 11513 mutex_exit(SD_MUTEX(un));
11516 11514
11517 11515 /*
11518 11516 * This will (eventually) allocate the sd_xbuf area and
11519 11517 * call sd_xbuf_strategy(). We just want to return the
11520 11518 * result of ddi_xbuf_qstrategy so that we have an opt-
11521 11519 * imized tail call which saves us a stack frame.
11522 11520 */
11523 11521 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
11524 11522 }
11525 11523
11526 11524
11527 11525 /*
11528 11526 * Function: sd_xbuf_strategy
11529 11527 *
11530 11528 * Description: Function for initiating IO operations via the
11531 11529 * ddi_xbuf_qstrategy() mechanism.
11532 11530 *
11533 11531 * Context: Kernel thread context.
11534 11532 */
11535 11533
11536 11534 static void
11537 11535 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
11538 11536 {
11539 11537 struct sd_lun *un = arg;
11540 11538
11541 11539 ASSERT(bp != NULL);
11542 11540 ASSERT(xp != NULL);
11543 11541 ASSERT(un != NULL);
11544 11542 ASSERT(!mutex_owned(SD_MUTEX(un)));
11545 11543
11546 11544 /*
11547 11545 * Initialize the fields in the xbuf and save a pointer to the
11548 11546 * xbuf in bp->b_private.
11549 11547 */
11550 11548 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
11551 11549
11552 11550 /* Send the buf down the iostart chain */
11553 11551 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
11554 11552 }
11555 11553
11556 11554
11557 11555 /*
11558 11556 * Function: sd_xbuf_init
11559 11557 *
11560 11558 * Description: Prepare the given sd_xbuf struct for use.
11561 11559 *
11562 11560 * Arguments: un - ptr to softstate
11563 11561 * bp - ptr to associated buf(9S)
11564 11562 * xp - ptr to associated sd_xbuf
11565 11563 * chain_type - IO chain type to use:
11566 11564 * SD_CHAIN_NULL
11567 11565 * SD_CHAIN_BUFIO
11568 11566 * SD_CHAIN_USCSI
11569 11567 * SD_CHAIN_DIRECT
11570 11568 * SD_CHAIN_DIRECT_PRIORITY
11571 11569 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11572 11570 * initialization; may be NULL if none.
11573 11571 *
11574 11572 * Context: Kernel thread context
11575 11573 */
11576 11574
11577 11575 static void
11578 11576 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
11579 11577 uchar_t chain_type, void *pktinfop)
11580 11578 {
11581 11579 int index;
11582 11580
11583 11581 ASSERT(un != NULL);
11584 11582 ASSERT(bp != NULL);
11585 11583 ASSERT(xp != NULL);
11586 11584
11587 11585 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11588 11586 bp, chain_type);
11589 11587
11590 11588 xp->xb_un = un;
11591 11589 xp->xb_pktp = NULL;
11592 11590 xp->xb_pktinfo = pktinfop;
11593 11591 xp->xb_private = bp->b_private;
11594 11592 xp->xb_blkno = (daddr_t)bp->b_blkno;
11595 11593
11596 11594 /*
11597 11595 * Set up the iostart and iodone chain indexes in the xbuf, based
11598 11596 * upon the specified chain type to use.
11599 11597 */
11600 11598 switch (chain_type) {
11601 11599 case SD_CHAIN_NULL:
11602 11600 /*
11603 11601 * Fall thru to just use the values for the buf type, even
11604 11602 * tho for the NULL chain these values will never be used.
11605 11603 */
11606 11604 /* FALLTHRU */
11607 11605 case SD_CHAIN_BUFIO:
11608 11606 index = un->un_buf_chain_type;
11609 11607 if ((!un->un_f_has_removable_media) &&
11610 11608 (un->un_tgt_blocksize != 0) &&
11611 11609 (un->un_tgt_blocksize != DEV_BSIZE ||
11612 11610 un->un_f_enable_rmw)) {
11613 11611 int secmask = 0, blknomask = 0;
11614 11612 if (un->un_f_enable_rmw) {
11615 11613 blknomask =
11616 11614 (un->un_phy_blocksize / DEV_BSIZE) - 1;
11617 11615 secmask = un->un_phy_blocksize - 1;
11618 11616 } else {
11619 11617 blknomask =
11620 11618 (un->un_tgt_blocksize / DEV_BSIZE) - 1;
11621 11619 secmask = un->un_tgt_blocksize - 1;
11622 11620 }
11623 11621
11624 11622 if ((bp->b_lblkno & (blknomask)) ||
11625 11623 (bp->b_bcount & (secmask))) {
11626 11624 if ((un->un_f_rmw_type !=
11627 11625 SD_RMW_TYPE_RETURN_ERROR) ||
11628 11626 un->un_f_enable_rmw) {
11629 11627 if (un->un_f_pm_is_enabled == FALSE)
11630 11628 index =
11631 11629 SD_CHAIN_INFO_MSS_DSK_NO_PM;
11632 11630 else
11633 11631 index =
11634 11632 SD_CHAIN_INFO_MSS_DISK;
11635 11633 }
11636 11634 }
11637 11635 }
11638 11636 break;
11639 11637 case SD_CHAIN_USCSI:
11640 11638 index = un->un_uscsi_chain_type;
11641 11639 break;
11642 11640 case SD_CHAIN_DIRECT:
11643 11641 index = un->un_direct_chain_type;
11644 11642 break;
11645 11643 case SD_CHAIN_DIRECT_PRIORITY:
11646 11644 index = un->un_priority_chain_type;
11647 11645 break;
11648 11646 default:
11649 11647 /* We're really broken if we ever get here... */
11650 11648 panic("sd_xbuf_init: illegal chain type!");
11651 11649 /*NOTREACHED*/
11652 11650 }
11653 11651
11654 11652 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
11655 11653 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
11656 11654
11657 11655 /*
11658 11656 * It might be a bit easier to simply bzero the entire xbuf above,
11659 11657 * but it turns out that since we init a fair number of members anyway,
11660 11658 * we save a fair number cycles by doing explicit assignment of zero.
11661 11659 */
11662 11660 xp->xb_pkt_flags = 0;
11663 11661 xp->xb_dma_resid = 0;
11664 11662 xp->xb_retry_count = 0;
11665 11663 xp->xb_victim_retry_count = 0;
11666 11664 xp->xb_ua_retry_count = 0;
11667 11665 xp->xb_nr_retry_count = 0;
11668 11666 xp->xb_sense_bp = NULL;
11669 11667 xp->xb_sense_status = 0;
11670 11668 xp->xb_sense_state = 0;
11671 11669 xp->xb_sense_resid = 0;
11672 11670 xp->xb_ena = 0;
11673 11671
11674 11672 bp->b_private = xp;
11675 11673 bp->b_flags &= ~(B_DONE | B_ERROR);
11676 11674 bp->b_resid = 0;
11677 11675 bp->av_forw = NULL;
11678 11676 bp->av_back = NULL;
11679 11677 bioerror(bp, 0);
11680 11678
11681 11679 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
11682 11680 }
11683 11681
11684 11682
11685 11683 /*
11686 11684 * Function: sd_uscsi_strategy
11687 11685 *
11688 11686 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11689 11687 *
11690 11688 * Arguments: bp - buf struct ptr
11691 11689 *
11692 11690 * Return Code: Always returns 0
11693 11691 *
11694 11692 * Context: Kernel thread context
11695 11693 */
11696 11694
11697 11695 static int
11698 11696 sd_uscsi_strategy(struct buf *bp)
11699 11697 {
11700 11698 struct sd_lun *un;
11701 11699 struct sd_uscsi_info *uip;
11702 11700 struct sd_xbuf *xp;
11703 11701 uchar_t chain_type;
11704 11702 uchar_t cmd;
11705 11703
11706 11704 ASSERT(bp != NULL);
11707 11705
11708 11706 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11709 11707 if (un == NULL) {
11710 11708 bioerror(bp, EIO);
11711 11709 bp->b_resid = bp->b_bcount;
11712 11710 biodone(bp);
11713 11711 return (0);
11714 11712 }
11715 11713
11716 11714 ASSERT(!mutex_owned(SD_MUTEX(un)));
11717 11715
11718 11716 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11719 11717
11720 11718 /*
11721 11719 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11722 11720 */
11723 11721 ASSERT(bp->b_private != NULL);
11724 11722 uip = (struct sd_uscsi_info *)bp->b_private;
11725 11723 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
11726 11724
11727 11725 mutex_enter(SD_MUTEX(un));
11728 11726 /*
11729 11727 * atapi: Since we are running the CD for now in PIO mode we need to
11730 11728 * call bp_mapin here to avoid bp_mapin called interrupt context under
11731 11729 * the HBA's init_pkt routine.
11732 11730 */
11733 11731 if (un->un_f_cfg_is_atapi == TRUE) {
11734 11732 mutex_exit(SD_MUTEX(un));
11735 11733 bp_mapin(bp);
11736 11734 mutex_enter(SD_MUTEX(un));
11737 11735 }
11738 11736 un->un_ncmds_in_driver++;
11739 11737 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11740 11738 un->un_ncmds_in_driver);
11741 11739
11742 11740 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11743 11741 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11744 11742 un->un_f_sync_cache_required = TRUE;
11745 11743
11746 11744 mutex_exit(SD_MUTEX(un));
11747 11745
11748 11746 switch (uip->ui_flags) {
11749 11747 case SD_PATH_DIRECT:
11750 11748 chain_type = SD_CHAIN_DIRECT;
11751 11749 break;
11752 11750 case SD_PATH_DIRECT_PRIORITY:
11753 11751 chain_type = SD_CHAIN_DIRECT_PRIORITY;
11754 11752 break;
11755 11753 default:
11756 11754 chain_type = SD_CHAIN_USCSI;
11757 11755 break;
11758 11756 }
11759 11757
11760 11758 /*
11761 11759 * We may allocate extra buf for external USCSI commands. If the
11762 11760 * application asks for bigger than 20-byte sense data via USCSI,
11763 11761 * SCSA layer will allocate 252 bytes sense buf for that command.
11764 11762 */
11765 11763 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
11766 11764 SENSE_LENGTH) {
11767 11765 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
11768 11766 MAX_SENSE_LENGTH, KM_SLEEP);
11769 11767 } else {
11770 11768 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
11771 11769 }
11772 11770
11773 11771 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
11774 11772
11775 11773 /* Use the index obtained within xbuf_init */
11776 11774 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
11777 11775
11778 11776 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
11779 11777
11780 11778 return (0);
11781 11779 }
11782 11780
11783 11781 /*
11784 11782 * Function: sd_send_scsi_cmd
11785 11783 *
11786 11784 * Description: Runs a USCSI command for user (when called thru sdioctl),
11787 11785 * or for the driver
11788 11786 *
11789 11787 * Arguments: dev - the dev_t for the device
11790 11788 * incmd - ptr to a valid uscsi_cmd struct
11791 11789 * flag - bit flag, indicating open settings, 32/64 bit type
11792 11790 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11793 11791 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11794 11792 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11795 11793 * to use the USCSI "direct" chain and bypass the normal
11796 11794 * command waitq.
11797 11795 *
11798 11796 * Return Code: 0 - successful completion of the given command
11799 11797 * EIO - scsi_uscsi_handle_command() failed
11800 11798 * ENXIO - soft state not found for specified dev
11801 11799 * EINVAL
11802 11800 * EFAULT - copyin/copyout error
11803 11801 * return code of scsi_uscsi_handle_command():
11804 11802 * EIO
11805 11803 * ENXIO
11806 11804 * EACCES
11807 11805 *
11808 11806 * Context: Waits for command to complete. Can sleep.
11809 11807 */
11810 11808
11811 11809 static int
11812 11810 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11813 11811 enum uio_seg dataspace, int path_flag)
11814 11812 {
11815 11813 struct sd_lun *un;
11816 11814 sd_ssc_t *ssc;
11817 11815 int rval;
11818 11816
11819 11817 un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11820 11818 if (un == NULL) {
11821 11819 return (ENXIO);
11822 11820 }
11823 11821
11824 11822 /*
11825 11823 * Using sd_ssc_send to handle uscsi cmd
11826 11824 */
11827 11825 ssc = sd_ssc_init(un);
11828 11826 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11829 11827 sd_ssc_fini(ssc);
11830 11828
11831 11829 return (rval);
11832 11830 }
11833 11831
11834 11832 /*
11835 11833 * Function: sd_ssc_init
11836 11834 *
11837 11835 * Description: Uscsi end-user call this function to initialize necessary
11838 11836 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11839 11837 *
11840 11838 * The return value of sd_send_scsi_cmd will be treated as a
11841 11839 * fault in various conditions. Even it is not Zero, some
11842 11840 * callers may ignore the return value. That is to say, we can
11843 11841 * not make an accurate assessment in sdintr, since if a
11844 11842 * command is failed in sdintr it does not mean the caller of
11845 11843 * sd_send_scsi_cmd will treat it as a real failure.
11846 11844 *
11847 11845 * To avoid printing too many error logs for a failed uscsi
11848 11846 * packet that the caller may not treat it as a failure, the
11849 11847 * sd will keep silent for handling all uscsi commands.
11850 11848 *
11851 11849 * During detach->attach and attach-open, for some types of
11852 11850 * problems, the driver should be providing information about
11853 11851 * the problem encountered. Device use USCSI_SILENT, which
11854 11852 * suppresses all driver information. The result is that no
11855 11853 * information about the problem is available. Being
11856 11854 * completely silent during this time is inappropriate. The
11857 11855 * driver needs a more selective filter than USCSI_SILENT, so
11858 11856 * that information related to faults is provided.
11859 11857 *
11860 11858 * To make the accurate accessment, the caller of
11861 11859 * sd_send_scsi_USCSI_CMD should take the ownership and
11862 11860 * get necessary information to print error messages.
11863 11861 *
11864 11862 * If we want to print necessary info of uscsi command, we need to
11865 11863 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11866 11864 * assessment. We use sd_ssc_init to alloc necessary
11867 11865 * structs for sending an uscsi command and we are also
11868 11866 * responsible for free the memory by calling
11869 11867 * sd_ssc_fini.
11870 11868 *
11871 11869 * The calling secquences will look like:
11872 11870 * sd_ssc_init->
11873 11871 *
11874 11872 * ...
11875 11873 *
11876 11874 * sd_send_scsi_USCSI_CMD->
11877 11875 * sd_ssc_send-> - - - sdintr
11878 11876 * ...
11879 11877 *
11880 11878 * if we think the return value should be treated as a
11881 11879 * failure, we make the accessment here and print out
11882 11880 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11883 11881 *
11884 11882 * ...
11885 11883 *
11886 11884 * sd_ssc_fini
11887 11885 *
11888 11886 *
11889 11887 * Arguments: un - pointer to driver soft state (unit) structure for this
11890 11888 * target.
11891 11889 *
11892 11890 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11893 11891 * uscsi_cmd and sd_uscsi_info.
11894 11892 * NULL - if can not alloc memory for sd_ssc_t struct
11895 11893 *
11896 11894 * Context: Kernel Thread.
11897 11895 */
11898 11896 static sd_ssc_t *
11899 11897 sd_ssc_init(struct sd_lun *un)
11900 11898 {
11901 11899 sd_ssc_t *ssc;
11902 11900 struct uscsi_cmd *ucmdp;
11903 11901 struct sd_uscsi_info *uip;
11904 11902
11905 11903 ASSERT(un != NULL);
11906 11904 ASSERT(!mutex_owned(SD_MUTEX(un)));
11907 11905
11908 11906 /*
11909 11907 * Allocate sd_ssc_t structure
11910 11908 */
11911 11909 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
11912 11910
11913 11911 /*
11914 11912 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11915 11913 */
11916 11914 ucmdp = scsi_uscsi_alloc();
11917 11915
11918 11916 /*
11919 11917 * Allocate sd_uscsi_info structure
11920 11918 */
11921 11919 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
11922 11920
11923 11921 ssc->ssc_uscsi_cmd = ucmdp;
11924 11922 ssc->ssc_uscsi_info = uip;
11925 11923 ssc->ssc_un = un;
11926 11924
11927 11925 return (ssc);
11928 11926 }
11929 11927
11930 11928 /*
11931 11929 * Function: sd_ssc_fini
11932 11930 *
11933 11931 * Description: To free sd_ssc_t and it's hanging off
11934 11932 *
11935 11933 * Arguments: ssc - struct pointer of sd_ssc_t.
11936 11934 */
11937 11935 static void
11938 11936 sd_ssc_fini(sd_ssc_t *ssc)
11939 11937 {
11940 11938 scsi_uscsi_free(ssc->ssc_uscsi_cmd);
11941 11939
11942 11940 if (ssc->ssc_uscsi_info != NULL) {
11943 11941 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
11944 11942 ssc->ssc_uscsi_info = NULL;
11945 11943 }
11946 11944
11947 11945 kmem_free(ssc, sizeof (sd_ssc_t));
11948 11946 ssc = NULL;
11949 11947 }
11950 11948
11951 11949 /*
11952 11950 * Function: sd_ssc_send
11953 11951 *
11954 11952 * Description: Runs a USCSI command for user when called through sdioctl,
11955 11953 * or for the driver.
11956 11954 *
11957 11955 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11958 11956 * sd_uscsi_info in.
11959 11957 * incmd - ptr to a valid uscsi_cmd struct
11960 11958 * flag - bit flag, indicating open settings, 32/64 bit type
11961 11959 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11962 11960 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11963 11961 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11964 11962 * to use the USCSI "direct" chain and bypass the normal
11965 11963 * command waitq.
11966 11964 *
11967 11965 * Return Code: 0 - successful completion of the given command
11968 11966 * EIO - scsi_uscsi_handle_command() failed
11969 11967 * ENXIO - soft state not found for specified dev
11970 11968 * ECANCELED - command cancelled due to low power
11971 11969 * EINVAL
11972 11970 * EFAULT - copyin/copyout error
11973 11971 * return code of scsi_uscsi_handle_command():
11974 11972 * EIO
11975 11973 * ENXIO
11976 11974 * EACCES
11977 11975 *
11978 11976 * Context: Kernel Thread;
11979 11977 * Waits for command to complete. Can sleep.
11980 11978 */
11981 11979 static int
11982 11980 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11983 11981 enum uio_seg dataspace, int path_flag)
11984 11982 {
11985 11983 struct sd_uscsi_info *uip;
11986 11984 struct uscsi_cmd *uscmd;
11987 11985 struct sd_lun *un;
11988 11986 dev_t dev;
11989 11987
11990 11988 int format = 0;
11991 11989 int rval;
11992 11990
11993 11991 ASSERT(ssc != NULL);
11994 11992 un = ssc->ssc_un;
11995 11993 ASSERT(un != NULL);
11996 11994 uscmd = ssc->ssc_uscsi_cmd;
11997 11995 ASSERT(uscmd != NULL);
11998 11996 ASSERT(!mutex_owned(SD_MUTEX(un)));
11999 11997 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12000 11998 /*
12001 11999 * If enter here, it indicates that the previous uscsi
12002 12000 * command has not been processed by sd_ssc_assessment.
12003 12001 * This is violating our rules of FMA telemetry processing.
12004 12002 * We should print out this message and the last undisposed
12005 12003 * uscsi command.
12006 12004 */
12007 12005 if (uscmd->uscsi_cdb != NULL) {
12008 12006 SD_INFO(SD_LOG_SDTEST, un,
12009 12007 "sd_ssc_send is missing the alternative "
12010 12008 "sd_ssc_assessment when running command 0x%x.\n",
12011 12009 uscmd->uscsi_cdb[0]);
12012 12010 }
12013 12011 /*
12014 12012 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
12015 12013 * the initial status.
12016 12014 */
12017 12015 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12018 12016 }
12019 12017
12020 12018 /*
12021 12019 * We need to make sure sd_ssc_send will have sd_ssc_assessment
12022 12020 * followed to avoid missing FMA telemetries.
12023 12021 */
12024 12022 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
12025 12023
12026 12024 /*
12027 12025 * if USCSI_PMFAILFAST is set and un is in low power, fail the
12028 12026 * command immediately.
12029 12027 */
12030 12028 mutex_enter(SD_MUTEX(un));
12031 12029 mutex_enter(&un->un_pm_mutex);
12032 12030 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
12033 12031 SD_DEVICE_IS_IN_LOW_POWER(un)) {
12034 12032 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
12035 12033 "un:0x%p is in low power\n", un);
12036 12034 mutex_exit(&un->un_pm_mutex);
12037 12035 mutex_exit(SD_MUTEX(un));
12038 12036 return (ECANCELED);
12039 12037 }
12040 12038 mutex_exit(&un->un_pm_mutex);
12041 12039 mutex_exit(SD_MUTEX(un));
12042 12040
12043 12041 #ifdef SDDEBUG
12044 12042 switch (dataspace) {
12045 12043 case UIO_USERSPACE:
12046 12044 SD_TRACE(SD_LOG_IO, un,
12047 12045 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
12048 12046 break;
12049 12047 case UIO_SYSSPACE:
12050 12048 SD_TRACE(SD_LOG_IO, un,
12051 12049 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
12052 12050 break;
12053 12051 default:
12054 12052 SD_TRACE(SD_LOG_IO, un,
12055 12053 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
12056 12054 break;
12057 12055 }
12058 12056 #endif
12059 12057
12060 12058 rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
12061 12059 SD_ADDRESS(un), &uscmd);
12062 12060 if (rval != 0) {
12063 12061 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
12064 12062 "scsi_uscsi_alloc_and_copyin failed\n", un);
12065 12063 return (rval);
12066 12064 }
12067 12065
12068 12066 if ((uscmd->uscsi_cdb != NULL) &&
12069 12067 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
12070 12068 mutex_enter(SD_MUTEX(un));
12071 12069 un->un_f_format_in_progress = TRUE;
12072 12070 mutex_exit(SD_MUTEX(un));
12073 12071 format = 1;
12074 12072 }
12075 12073
12076 12074 /*
12077 12075 * Allocate an sd_uscsi_info struct and fill it with the info
12078 12076 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12079 12077 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12080 12078 * since we allocate the buf here in this function, we do not
12081 12079 * need to preserve the prior contents of b_private.
12082 12080 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12083 12081 */
12084 12082 uip = ssc->ssc_uscsi_info;
12085 12083 uip->ui_flags = path_flag;
12086 12084 uip->ui_cmdp = uscmd;
12087 12085
12088 12086 /*
12089 12087 * Commands sent with priority are intended for error recovery
12090 12088 * situations, and do not have retries performed.
12091 12089 */
12092 12090 if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12093 12091 uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12094 12092 }
12095 12093 uscmd->uscsi_flags &= ~USCSI_NOINTR;
12096 12094
12097 12095 dev = SD_GET_DEV(un);
12098 12096 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12099 12097 sd_uscsi_strategy, NULL, uip);
12100 12098
12101 12099 /*
12102 12100 * mark ssc_flags right after handle_cmd to make sure
12103 12101 * the uscsi has been sent
12104 12102 */
12105 12103 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12106 12104
12107 12105 #ifdef SDDEBUG
12108 12106 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12109 12107 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12110 12108 uscmd->uscsi_status, uscmd->uscsi_resid);
12111 12109 if (uscmd->uscsi_bufaddr != NULL) {
12112 12110 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12113 12111 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12114 12112 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
12115 12113 if (dataspace == UIO_SYSSPACE) {
12116 12114 SD_DUMP_MEMORY(un, SD_LOG_IO,
12117 12115 "data", (uchar_t *)uscmd->uscsi_bufaddr,
12118 12116 uscmd->uscsi_buflen, SD_LOG_HEX);
12119 12117 }
12120 12118 }
12121 12119 #endif
12122 12120
12123 12121 if (format == 1) {
12124 12122 mutex_enter(SD_MUTEX(un));
12125 12123 un->un_f_format_in_progress = FALSE;
12126 12124 mutex_exit(SD_MUTEX(un));
12127 12125 }
12128 12126
12129 12127 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
12130 12128
12131 12129 return (rval);
12132 12130 }
12133 12131
12134 12132 /*
12135 12133 * Function: sd_ssc_print
12136 12134 *
12137 12135 * Description: Print information available to the console.
12138 12136 *
12139 12137 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12140 12138 * sd_uscsi_info in.
12141 12139 * sd_severity - log level.
12142 12140 * Context: Kernel thread or interrupt context.
12143 12141 */
12144 12142 static void
12145 12143 sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
12146 12144 {
12147 12145 struct uscsi_cmd *ucmdp;
12148 12146 struct scsi_device *devp;
12149 12147 dev_info_t *devinfo;
12150 12148 uchar_t *sensep;
12151 12149 int senlen;
12152 12150 union scsi_cdb *cdbp;
12153 12151 uchar_t com;
12154 12152 extern struct scsi_key_strings scsi_cmds[];
12155 12153
12156 12154 ASSERT(ssc != NULL);
12157 12155 ASSERT(ssc->ssc_un != NULL);
12158 12156
12159 12157 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
12160 12158 return;
12161 12159 ucmdp = ssc->ssc_uscsi_cmd;
12162 12160 devp = SD_SCSI_DEVP(ssc->ssc_un);
12163 12161 devinfo = SD_DEVINFO(ssc->ssc_un);
12164 12162 ASSERT(ucmdp != NULL);
12165 12163 ASSERT(devp != NULL);
12166 12164 ASSERT(devinfo != NULL);
12167 12165 sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
12168 12166 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
12169 12167 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
12170 12168
12171 12169 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12172 12170 if (cdbp == NULL)
12173 12171 return;
12174 12172 /* We don't print log if no sense data available. */
12175 12173 if (senlen == 0)
12176 12174 sensep = NULL;
12177 12175 com = cdbp->scc_cmd;
12178 12176 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
12179 12177 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
12180 12178 }
12181 12179
12182 12180 /*
12183 12181 * Function: sd_ssc_assessment
12184 12182 *
12185 12183 * Description: We use this function to make an assessment at the point
12186 12184 * where SD driver may encounter a potential error.
12187 12185 *
12188 12186 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12189 12187 * sd_uscsi_info in.
12190 12188 * tp_assess - a hint of strategy for ereport posting.
12191 12189 * Possible values of tp_assess include:
12192 12190 * SD_FMT_IGNORE - we don't post any ereport because we're
12193 12191 * sure that it is ok to ignore the underlying problems.
12194 12192 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12195 12193 * but it might be not correct to ignore the underlying hardware
12196 12194 * error.
12197 12195 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12198 12196 * payload driver-assessment of value "fail" or
12199 12197 * "fatal"(depending on what information we have here). This
12200 12198 * assessment value is usually set when SD driver think there
12201 12199 * is a potential error occurred(Typically, when return value
12202 12200 * of the SCSI command is EIO).
12203 12201 * SD_FMT_STANDARD - we will post an ereport with the payload
12204 12202 * driver-assessment of value "info". This assessment value is
12205 12203 * set when the SCSI command returned successfully and with
12206 12204 * sense data sent back.
12207 12205 *
12208 12206 * Context: Kernel thread.
12209 12207 */
12210 12208 static void
12211 12209 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
12212 12210 {
12213 12211 int senlen = 0;
12214 12212 struct uscsi_cmd *ucmdp = NULL;
12215 12213 struct sd_lun *un;
12216 12214
12217 12215 ASSERT(ssc != NULL);
12218 12216 un = ssc->ssc_un;
12219 12217 ASSERT(un != NULL);
12220 12218 ucmdp = ssc->ssc_uscsi_cmd;
12221 12219 ASSERT(ucmdp != NULL);
12222 12220
12223 12221 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12224 12222 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
12225 12223 } else {
12226 12224 /*
12227 12225 * If enter here, it indicates that we have a wrong
12228 12226 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12229 12227 * both of which should be called in a pair in case of
12230 12228 * loss of FMA telemetries.
12231 12229 */
12232 12230 if (ucmdp->uscsi_cdb != NULL) {
12233 12231 SD_INFO(SD_LOG_SDTEST, un,
12234 12232 "sd_ssc_assessment is missing the "
12235 12233 "alternative sd_ssc_send when running 0x%x, "
12236 12234 "or there are superfluous sd_ssc_assessment for "
12237 12235 "the same sd_ssc_send.\n",
12238 12236 ucmdp->uscsi_cdb[0]);
12239 12237 }
12240 12238 /*
12241 12239 * Set the ssc_flags to the initial value to avoid passing
12242 12240 * down dirty flags to the following sd_ssc_send function.
12243 12241 */
12244 12242 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12245 12243 return;
12246 12244 }
12247 12245
12248 12246 /*
12249 12247 * Only handle an issued command which is waiting for assessment.
12250 12248 * A command which is not issued will not have
12251 12249 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12252 12250 */
12253 12251 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
12254 12252 sd_ssc_print(ssc, SCSI_ERR_INFO);
12255 12253 return;
12256 12254 } else {
12257 12255 /*
12258 12256 * For an issued command, we should clear this flag in
12259 12257 * order to make the sd_ssc_t structure be used off
12260 12258 * multiple uscsi commands.
12261 12259 */
12262 12260 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
12263 12261 }
12264 12262
12265 12263 /*
12266 12264 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12267 12265 * commands here. And we should clear the ssc_flags before return.
12268 12266 */
12269 12267 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
12270 12268 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12271 12269 return;
12272 12270 }
12273 12271
12274 12272 switch (tp_assess) {
12275 12273 case SD_FMT_IGNORE:
12276 12274 case SD_FMT_IGNORE_COMPROMISE:
12277 12275 break;
12278 12276 case SD_FMT_STATUS_CHECK:
12279 12277 /*
12280 12278 * For a failed command(including the succeeded command
12281 12279 * with invalid data sent back).
12282 12280 */
12283 12281 sd_ssc_post(ssc, SD_FM_DRV_FATAL);
12284 12282 break;
12285 12283 case SD_FMT_STANDARD:
12286 12284 /*
12287 12285 * Always for the succeeded commands probably with sense
12288 12286 * data sent back.
12289 12287 * Limitation:
12290 12288 * We can only handle a succeeded command with sense
12291 12289 * data sent back when auto-request-sense is enabled.
12292 12290 */
12293 12291 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
12294 12292 ssc->ssc_uscsi_cmd->uscsi_rqresid;
12295 12293 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
12296 12294 (un->un_f_arq_enabled == TRUE) &&
12297 12295 senlen > 0 &&
12298 12296 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
12299 12297 sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
12300 12298 }
12301 12299 break;
12302 12300 default:
12303 12301 /*
12304 12302 * Should not have other type of assessment.
12305 12303 */
12306 12304 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
12307 12305 "sd_ssc_assessment got wrong "
12308 12306 "sd_type_assessment %d.\n", tp_assess);
12309 12307 break;
12310 12308 }
12311 12309 /*
12312 12310 * Clear up the ssc_flags before return.
12313 12311 */
12314 12312 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12315 12313 }
12316 12314
12317 12315 /*
12318 12316 * Function: sd_ssc_post
12319 12317 *
12320 12318 * Description: 1. read the driver property to get fm-scsi-log flag.
12321 12319 * 2. print log if fm_log_capable is non-zero.
12322 12320 * 3. call sd_ssc_ereport_post to post ereport if possible.
12323 12321 *
12324 12322 * Context: May be called from kernel thread or interrupt context.
12325 12323 */
12326 12324 static void
12327 12325 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
12328 12326 {
12329 12327 struct sd_lun *un;
12330 12328 int sd_severity;
12331 12329
12332 12330 ASSERT(ssc != NULL);
12333 12331 un = ssc->ssc_un;
12334 12332 ASSERT(un != NULL);
12335 12333
12336 12334 /*
12337 12335 * We may enter here from sd_ssc_assessment(for USCSI command) or
12338 12336 * by directly called from sdintr context.
12339 12337 * We don't handle a non-disk drive(CD-ROM, removable media).
12340 12338 * Clear the ssc_flags before return in case we've set
12341 12339 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12342 12340 * driver.
12343 12341 */
12344 12342 if (ISCD(un) || un->un_f_has_removable_media) {
12345 12343 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12346 12344 return;
12347 12345 }
12348 12346
12349 12347 switch (sd_assess) {
12350 12348 case SD_FM_DRV_FATAL:
12351 12349 sd_severity = SCSI_ERR_FATAL;
12352 12350 break;
12353 12351 case SD_FM_DRV_RECOVERY:
12354 12352 sd_severity = SCSI_ERR_RECOVERED;
12355 12353 break;
12356 12354 case SD_FM_DRV_RETRY:
12357 12355 sd_severity = SCSI_ERR_RETRYABLE;
12358 12356 break;
12359 12357 case SD_FM_DRV_NOTICE:
12360 12358 sd_severity = SCSI_ERR_INFO;
12361 12359 break;
12362 12360 default:
12363 12361 sd_severity = SCSI_ERR_UNKNOWN;
12364 12362 }
12365 12363 /* print log */
12366 12364 sd_ssc_print(ssc, sd_severity);
12367 12365
12368 12366 /* always post ereport */
12369 12367 sd_ssc_ereport_post(ssc, sd_assess);
12370 12368 }
12371 12369
12372 12370 /*
12373 12371 * Function: sd_ssc_set_info
12374 12372 *
12375 12373 * Description: Mark ssc_flags and set ssc_info which would be the
12376 12374 * payload of uderr ereport. This function will cause
12377 12375 * sd_ssc_ereport_post to post uderr ereport only.
12378 12376 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12379 12377 * the function will also call SD_ERROR or scsi_log for a
12380 12378 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12381 12379 *
12382 12380 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12383 12381 * sd_uscsi_info in.
12384 12382 * ssc_flags - indicate the sub-category of a uderr.
12385 12383 * comp - this argument is meaningful only when
12386 12384 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12387 12385 * values include:
12388 12386 * > 0, SD_ERROR is used with comp as the driver logging
12389 12387 * component;
12390 12388 * = 0, scsi-log is used to log error telemetries;
12391 12389 * < 0, no log available for this telemetry.
12392 12390 *
12393 12391 * Context: Kernel thread or interrupt context
12394 12392 */
12395 12393 static void
12396 12394 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
12397 12395 {
12398 12396 va_list ap;
12399 12397
12400 12398 ASSERT(ssc != NULL);
12401 12399 ASSERT(ssc->ssc_un != NULL);
12402 12400
12403 12401 ssc->ssc_flags |= ssc_flags;
12404 12402 va_start(ap, fmt);
12405 12403 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
12406 12404 va_end(ap);
12407 12405
12408 12406 /*
12409 12407 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12410 12408 * with invalid data sent back. For non-uscsi command, the
12411 12409 * following code will be bypassed.
12412 12410 */
12413 12411 if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
12414 12412 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
12415 12413 /*
12416 12414 * If the error belong to certain component and we
12417 12415 * do not want it to show up on the console, we
12418 12416 * will use SD_ERROR, otherwise scsi_log is
12419 12417 * preferred.
12420 12418 */
12421 12419 if (comp > 0) {
12422 12420 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
12423 12421 } else if (comp == 0) {
12424 12422 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
12425 12423 CE_WARN, ssc->ssc_info);
12426 12424 }
12427 12425 }
12428 12426 }
12429 12427 }
12430 12428
12431 12429 /*
12432 12430 * Function: sd_buf_iodone
12433 12431 *
12434 12432 * Description: Frees the sd_xbuf & returns the buf to its originator.
12435 12433 *
12436 12434 * Context: May be called from interrupt context.
12437 12435 */
12438 12436 /* ARGSUSED */
12439 12437 static void
12440 12438 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
12441 12439 {
12442 12440 struct sd_xbuf *xp;
12443 12441
12444 12442 ASSERT(un != NULL);
12445 12443 ASSERT(bp != NULL);
12446 12444 ASSERT(!mutex_owned(SD_MUTEX(un)));
12447 12445
12448 12446 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
12449 12447
12450 12448 xp = SD_GET_XBUF(bp);
12451 12449 ASSERT(xp != NULL);
12452 12450
12453 12451 /* xbuf is gone after this */
↓ open down ↓ |
5880 lines elided |
↑ open up ↑ |
12454 12452 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
12455 12453 mutex_enter(SD_MUTEX(un));
12456 12454
12457 12455 /*
12458 12456 * Grab time when the cmd completed.
12459 12457 * This is used for determining if the system has been
12460 12458 * idle long enough to make it idle to the PM framework.
12461 12459 * This is for lowering the overhead, and therefore improving
12462 12460 * performance per I/O operation.
12463 12461 */
12464 - un->un_pm_idle_time = ddi_get_time();
12462 + un->un_pm_idle_time = gethrtime();
12465 12463
12466 12464 un->un_ncmds_in_driver--;
12467 12465 ASSERT(un->un_ncmds_in_driver >= 0);
12468 12466 SD_INFO(SD_LOG_IO, un,
12469 12467 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12470 12468 un->un_ncmds_in_driver);
12471 12469
12472 12470 mutex_exit(SD_MUTEX(un));
12473 12471 }
12474 12472
12475 12473 biodone(bp); /* bp is gone after this */
12476 12474
12477 12475 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
12478 12476 }
12479 12477
12480 12478
12481 12479 /*
12482 12480 * Function: sd_uscsi_iodone
12483 12481 *
12484 12482 * Description: Frees the sd_xbuf & returns the buf to its originator.
12485 12483 *
12486 12484 * Context: May be called from interrupt context.
12487 12485 */
12488 12486 /* ARGSUSED */
12489 12487 static void
12490 12488 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
12491 12489 {
12492 12490 struct sd_xbuf *xp;
12493 12491
12494 12492 ASSERT(un != NULL);
12495 12493 ASSERT(bp != NULL);
12496 12494
12497 12495 xp = SD_GET_XBUF(bp);
12498 12496 ASSERT(xp != NULL);
12499 12497 ASSERT(!mutex_owned(SD_MUTEX(un)));
12500 12498
12501 12499 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
12502 12500
12503 12501 bp->b_private = xp->xb_private;
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
12504 12502
12505 12503 mutex_enter(SD_MUTEX(un));
12506 12504
12507 12505 /*
12508 12506 * Grab time when the cmd completed.
12509 12507 * This is used for determining if the system has been
12510 12508 * idle long enough to make it idle to the PM framework.
12511 12509 * This is for lowering the overhead, and therefore improving
12512 12510 * performance per I/O operation.
12513 12511 */
12514 - un->un_pm_idle_time = ddi_get_time();
12512 + un->un_pm_idle_time = gethrtime();
12515 12513
12516 12514 un->un_ncmds_in_driver--;
12517 12515 ASSERT(un->un_ncmds_in_driver >= 0);
12518 12516 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12519 12517 un->un_ncmds_in_driver);
12520 12518
12521 12519 mutex_exit(SD_MUTEX(un));
12522 12520
12523 12521 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12524 12522 SENSE_LENGTH) {
12525 12523 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12526 12524 MAX_SENSE_LENGTH);
12527 12525 } else {
12528 12526 kmem_free(xp, sizeof (struct sd_xbuf));
12529 12527 }
12530 12528
12531 12529 biodone(bp);
12532 12530
12533 12531 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
12534 12532 }
12535 12533
12536 12534
12537 12535 /*
12538 12536 * Function: sd_mapblockaddr_iostart
12539 12537 *
12540 12538 * Description: Verify request lies within the partition limits for
12541 12539 * the indicated minor device. Issue "overrun" buf if
12542 12540 * request would exceed partition range. Converts
12543 12541 * partition-relative block address to absolute.
12544 12542 *
12545 12543 * Upon exit of this function:
12546 12544 * 1.I/O is aligned
12547 12545 * xp->xb_blkno represents the absolute sector address
12548 12546 * 2.I/O is misaligned
12549 12547 * xp->xb_blkno represents the absolute logical block address
12550 12548 * based on DEV_BSIZE. The logical block address will be
12551 12549 * converted to physical sector address in sd_mapblocksize_\
12552 12550 * iostart.
12553 12551 * 3.I/O is misaligned but is aligned in "overrun" buf
12554 12552 * xp->xb_blkno represents the absolute logical block address
12555 12553 * based on DEV_BSIZE. The logical block address will be
12556 12554 * converted to physical sector address in sd_mapblocksize_\
12557 12555 * iostart. But no RMW will be issued in this case.
12558 12556 *
12559 12557 * Context: Can sleep
12560 12558 *
12561 12559 * Issues: This follows what the old code did, in terms of accessing
12562 12560 * some of the partition info in the unit struct without holding
12563 12561 * the mutext. This is a general issue, if the partition info
12564 12562 * can be altered while IO is in progress... as soon as we send
12565 12563 * a buf, its partitioning can be invalid before it gets to the
12566 12564 * device. Probably the right fix is to move partitioning out
12567 12565 * of the driver entirely.
12568 12566 */
12569 12567
12570 12568 static void
12571 12569 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
12572 12570 {
12573 12571 diskaddr_t nblocks; /* #blocks in the given partition */
12574 12572 daddr_t blocknum; /* Block number specified by the buf */
12575 12573 size_t requested_nblocks;
12576 12574 size_t available_nblocks;
12577 12575 int partition;
12578 12576 diskaddr_t partition_offset;
12579 12577 struct sd_xbuf *xp;
12580 12578 int secmask = 0, blknomask = 0;
12581 12579 ushort_t is_aligned = TRUE;
12582 12580
12583 12581 ASSERT(un != NULL);
12584 12582 ASSERT(bp != NULL);
12585 12583 ASSERT(!mutex_owned(SD_MUTEX(un)));
12586 12584
12587 12585 SD_TRACE(SD_LOG_IO_PARTITION, un,
12588 12586 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
12589 12587
12590 12588 xp = SD_GET_XBUF(bp);
12591 12589 ASSERT(xp != NULL);
12592 12590
12593 12591 /*
12594 12592 * If the geometry is not indicated as valid, attempt to access
12595 12593 * the unit & verify the geometry/label. This can be the case for
12596 12594 * removable-media devices, of if the device was opened in
12597 12595 * NDELAY/NONBLOCK mode.
12598 12596 */
12599 12597 partition = SDPART(bp->b_edev);
12600 12598
12601 12599 if (!SD_IS_VALID_LABEL(un)) {
12602 12600 sd_ssc_t *ssc;
12603 12601 /*
12604 12602 * Initialize sd_ssc_t for internal uscsi commands
12605 12603 * In case of potential porformance issue, we need
12606 12604 * to alloc memory only if there is invalid label
12607 12605 */
12608 12606 ssc = sd_ssc_init(un);
12609 12607
12610 12608 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
12611 12609 /*
12612 12610 * For removable devices it is possible to start an
12613 12611 * I/O without a media by opening the device in nodelay
12614 12612 * mode. Also for writable CDs there can be many
12615 12613 * scenarios where there is no geometry yet but volume
12616 12614 * manager is trying to issue a read() just because
12617 12615 * it can see TOC on the CD. So do not print a message
12618 12616 * for removables.
12619 12617 */
12620 12618 if (!un->un_f_has_removable_media) {
12621 12619 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
12622 12620 "i/o to invalid geometry\n");
12623 12621 }
12624 12622 bioerror(bp, EIO);
12625 12623 bp->b_resid = bp->b_bcount;
12626 12624 SD_BEGIN_IODONE(index, un, bp);
12627 12625
12628 12626 sd_ssc_fini(ssc);
12629 12627 return;
12630 12628 }
12631 12629 sd_ssc_fini(ssc);
12632 12630 }
12633 12631
12634 12632 nblocks = 0;
12635 12633 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
12636 12634 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
12637 12635
12638 12636 if (un->un_f_enable_rmw) {
12639 12637 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
12640 12638 secmask = un->un_phy_blocksize - 1;
12641 12639 } else {
12642 12640 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
12643 12641 secmask = un->un_tgt_blocksize - 1;
12644 12642 }
12645 12643
12646 12644 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
12647 12645 is_aligned = FALSE;
12648 12646 }
12649 12647
12650 12648 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
12651 12649 /*
12652 12650 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12653 12651 * Convert the logical block number to target's physical sector
12654 12652 * number.
12655 12653 */
12656 12654 if (is_aligned) {
12657 12655 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
12658 12656 } else {
12659 12657 /*
12660 12658 * There is no RMW if we're just reading, so don't
12661 12659 * warn or error out because of it.
12662 12660 */
12663 12661 if (bp->b_flags & B_READ) {
12664 12662 /*EMPTY*/
12665 12663 } else if (!un->un_f_enable_rmw &&
12666 12664 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
12667 12665 bp->b_flags |= B_ERROR;
12668 12666 goto error_exit;
12669 12667 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
12670 12668 mutex_enter(SD_MUTEX(un));
12671 12669 if (!un->un_f_enable_rmw &&
12672 12670 un->un_rmw_msg_timeid == NULL) {
12673 12671 scsi_log(SD_DEVINFO(un), sd_label,
12674 12672 CE_WARN, "I/O request is not "
12675 12673 "aligned with %d disk sector size. "
12676 12674 "It is handled through Read Modify "
12677 12675 "Write but the performance is "
12678 12676 "very low.\n",
12679 12677 un->un_tgt_blocksize);
12680 12678 un->un_rmw_msg_timeid =
12681 12679 timeout(sd_rmw_msg_print_handler,
12682 12680 un, SD_RMW_MSG_PRINT_TIMEOUT);
12683 12681 } else {
12684 12682 un->un_rmw_incre_count ++;
12685 12683 }
12686 12684 mutex_exit(SD_MUTEX(un));
12687 12685 }
12688 12686
12689 12687 nblocks = SD_TGT2SYSBLOCK(un, nblocks);
12690 12688 partition_offset = SD_TGT2SYSBLOCK(un,
12691 12689 partition_offset);
12692 12690 }
12693 12691 }
12694 12692
12695 12693 /*
12696 12694 * blocknum is the starting block number of the request. At this
12697 12695 * point it is still relative to the start of the minor device.
12698 12696 */
12699 12697 blocknum = xp->xb_blkno;
12700 12698
12701 12699 /*
12702 12700 * Legacy: If the starting block number is one past the last block
12703 12701 * in the partition, do not set B_ERROR in the buf.
12704 12702 */
12705 12703 if (blocknum == nblocks) {
12706 12704 goto error_exit;
12707 12705 }
12708 12706
12709 12707 /*
12710 12708 * Confirm that the first block of the request lies within the
12711 12709 * partition limits. Also the requested number of bytes must be
12712 12710 * a multiple of the system block size.
12713 12711 */
12714 12712 if ((blocknum < 0) || (blocknum >= nblocks) ||
12715 12713 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
12716 12714 bp->b_flags |= B_ERROR;
12717 12715 goto error_exit;
12718 12716 }
12719 12717
12720 12718 /*
12721 12719 * If the requsted # blocks exceeds the available # blocks, that
12722 12720 * is an overrun of the partition.
12723 12721 */
12724 12722 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12725 12723 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
12726 12724 } else {
12727 12725 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
12728 12726 }
12729 12727
12730 12728 available_nblocks = (size_t)(nblocks - blocknum);
12731 12729 ASSERT(nblocks >= blocknum);
12732 12730
12733 12731 if (requested_nblocks > available_nblocks) {
12734 12732 size_t resid;
12735 12733
12736 12734 /*
12737 12735 * Allocate an "overrun" buf to allow the request to proceed
12738 12736 * for the amount of space available in the partition. The
12739 12737 * amount not transferred will be added into the b_resid
12740 12738 * when the operation is complete. The overrun buf
12741 12739 * replaces the original buf here, and the original buf
12742 12740 * is saved inside the overrun buf, for later use.
12743 12741 */
12744 12742 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12745 12743 resid = SD_TGTBLOCKS2BYTES(un,
12746 12744 (offset_t)(requested_nblocks - available_nblocks));
12747 12745 } else {
12748 12746 resid = SD_SYSBLOCKS2BYTES(
12749 12747 (offset_t)(requested_nblocks - available_nblocks));
12750 12748 }
12751 12749
12752 12750 size_t count = bp->b_bcount - resid;
12753 12751 /*
12754 12752 * Note: count is an unsigned entity thus it'll NEVER
12755 12753 * be less than 0 so ASSERT the original values are
12756 12754 * correct.
12757 12755 */
12758 12756 ASSERT(bp->b_bcount >= resid);
12759 12757
12760 12758 bp = sd_bioclone_alloc(bp, count, blocknum,
12761 12759 (int (*)(struct buf *)) sd_mapblockaddr_iodone);
12762 12760 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */
12763 12761 ASSERT(xp != NULL);
12764 12762 }
12765 12763
12766 12764 /* At this point there should be no residual for this buf. */
12767 12765 ASSERT(bp->b_resid == 0);
12768 12766
12769 12767 /* Convert the block number to an absolute address. */
12770 12768 xp->xb_blkno += partition_offset;
12771 12769
12772 12770 SD_NEXT_IOSTART(index, un, bp);
12773 12771
12774 12772 SD_TRACE(SD_LOG_IO_PARTITION, un,
12775 12773 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
12776 12774
12777 12775 return;
12778 12776
12779 12777 error_exit:
12780 12778 bp->b_resid = bp->b_bcount;
12781 12779 SD_BEGIN_IODONE(index, un, bp);
12782 12780 SD_TRACE(SD_LOG_IO_PARTITION, un,
12783 12781 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
12784 12782 }
12785 12783
12786 12784
12787 12785 /*
12788 12786 * Function: sd_mapblockaddr_iodone
12789 12787 *
12790 12788 * Description: Completion-side processing for partition management.
12791 12789 *
12792 12790 * Context: May be called under interrupt context
12793 12791 */
12794 12792
12795 12793 static void
12796 12794 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
12797 12795 {
12798 12796 /* int partition; */ /* Not used, see below. */
12799 12797 ASSERT(un != NULL);
12800 12798 ASSERT(bp != NULL);
12801 12799 ASSERT(!mutex_owned(SD_MUTEX(un)));
12802 12800
12803 12801 SD_TRACE(SD_LOG_IO_PARTITION, un,
12804 12802 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
12805 12803
12806 12804 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) {
12807 12805 /*
12808 12806 * We have an "overrun" buf to deal with...
12809 12807 */
12810 12808 struct sd_xbuf *xp;
12811 12809 struct buf *obp; /* ptr to the original buf */
12812 12810
12813 12811 xp = SD_GET_XBUF(bp);
12814 12812 ASSERT(xp != NULL);
12815 12813
12816 12814 /* Retrieve the pointer to the original buf */
12817 12815 obp = (struct buf *)xp->xb_private;
12818 12816 ASSERT(obp != NULL);
12819 12817
12820 12818 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
12821 12819 bioerror(obp, bp->b_error);
12822 12820
12823 12821 sd_bioclone_free(bp);
12824 12822
12825 12823 /*
12826 12824 * Get back the original buf.
12827 12825 * Note that since the restoration of xb_blkno below
12828 12826 * was removed, the sd_xbuf is not needed.
12829 12827 */
12830 12828 bp = obp;
12831 12829 /*
12832 12830 * xp = SD_GET_XBUF(bp);
12833 12831 * ASSERT(xp != NULL);
12834 12832 */
12835 12833 }
12836 12834
12837 12835 /*
12838 12836 * Convert sd->xb_blkno back to a minor-device relative value.
12839 12837 * Note: this has been commented out, as it is not needed in the
12840 12838 * current implementation of the driver (ie, since this function
12841 12839 * is at the top of the layering chains, so the info will be
12842 12840 * discarded) and it is in the "hot" IO path.
12843 12841 *
12844 12842 * partition = getminor(bp->b_edev) & SDPART_MASK;
12845 12843 * xp->xb_blkno -= un->un_offset[partition];
12846 12844 */
12847 12845
12848 12846 SD_NEXT_IODONE(index, un, bp);
12849 12847
12850 12848 SD_TRACE(SD_LOG_IO_PARTITION, un,
12851 12849 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
12852 12850 }
12853 12851
12854 12852
12855 12853 /*
12856 12854 * Function: sd_mapblocksize_iostart
12857 12855 *
12858 12856 * Description: Convert between system block size (un->un_sys_blocksize)
12859 12857 * and target block size (un->un_tgt_blocksize).
12860 12858 *
12861 12859 * Context: Can sleep to allocate resources.
12862 12860 *
12863 12861 * Assumptions: A higher layer has already performed any partition validation,
12864 12862 * and converted the xp->xb_blkno to an absolute value relative
12865 12863 * to the start of the device.
12866 12864 *
12867 12865 * It is also assumed that the higher layer has implemented
12868 12866 * an "overrun" mechanism for the case where the request would
12869 12867 * read/write beyond the end of a partition. In this case we
12870 12868 * assume (and ASSERT) that bp->b_resid == 0.
12871 12869 *
12872 12870 * Note: The implementation for this routine assumes the target
12873 12871 * block size remains constant between allocation and transport.
12874 12872 */
12875 12873
12876 12874 static void
12877 12875 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
12878 12876 {
12879 12877 struct sd_mapblocksize_info *bsp;
12880 12878 struct sd_xbuf *xp;
12881 12879 offset_t first_byte;
12882 12880 daddr_t start_block, end_block;
12883 12881 daddr_t request_bytes;
12884 12882 ushort_t is_aligned = FALSE;
12885 12883
12886 12884 ASSERT(un != NULL);
12887 12885 ASSERT(bp != NULL);
12888 12886 ASSERT(!mutex_owned(SD_MUTEX(un)));
12889 12887 ASSERT(bp->b_resid == 0);
12890 12888
12891 12889 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
12892 12890 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
12893 12891
12894 12892 /*
12895 12893 * For a non-writable CD, a write request is an error
12896 12894 */
12897 12895 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
12898 12896 (un->un_f_mmc_writable_media == FALSE)) {
12899 12897 bioerror(bp, EIO);
12900 12898 bp->b_resid = bp->b_bcount;
12901 12899 SD_BEGIN_IODONE(index, un, bp);
12902 12900 return;
12903 12901 }
12904 12902
12905 12903 /*
12906 12904 * We do not need a shadow buf if the device is using
12907 12905 * un->un_sys_blocksize as its block size or if bcount == 0.
12908 12906 * In this case there is no layer-private data block allocated.
12909 12907 */
12910 12908 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12911 12909 (bp->b_bcount == 0)) {
12912 12910 goto done;
12913 12911 }
12914 12912
12915 12913 #if defined(__i386) || defined(__amd64)
12916 12914 /* We do not support non-block-aligned transfers for ROD devices */
12917 12915 ASSERT(!ISROD(un));
12918 12916 #endif
12919 12917
12920 12918 xp = SD_GET_XBUF(bp);
12921 12919 ASSERT(xp != NULL);
12922 12920
12923 12921 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12924 12922 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12925 12923 un->un_tgt_blocksize, DEV_BSIZE);
12926 12924 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12927 12925 "request start block:0x%x\n", xp->xb_blkno);
12928 12926 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12929 12927 "request len:0x%x\n", bp->b_bcount);
12930 12928
12931 12929 /*
12932 12930 * Allocate the layer-private data area for the mapblocksize layer.
12933 12931 * Layers are allowed to use the xp_private member of the sd_xbuf
12934 12932 * struct to store the pointer to their layer-private data block, but
12935 12933 * each layer also has the responsibility of restoring the prior
12936 12934 * contents of xb_private before returning the buf/xbuf to the
12937 12935 * higher layer that sent it.
12938 12936 *
12939 12937 * Here we save the prior contents of xp->xb_private into the
12940 12938 * bsp->mbs_oprivate field of our layer-private data area. This value
12941 12939 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12942 12940 * the layer-private area and returning the buf/xbuf to the layer
12943 12941 * that sent it.
12944 12942 *
12945 12943 * Note that here we use kmem_zalloc for the allocation as there are
12946 12944 * parts of the mapblocksize code that expect certain fields to be
12947 12945 * zero unless explicitly set to a required value.
12948 12946 */
12949 12947 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
12950 12948 bsp->mbs_oprivate = xp->xb_private;
12951 12949 xp->xb_private = bsp;
12952 12950
12953 12951 /*
12954 12952 * This treats the data on the disk (target) as an array of bytes.
12955 12953 * first_byte is the byte offset, from the beginning of the device,
12956 12954 * to the location of the request. This is converted from a
12957 12955 * un->un_sys_blocksize block address to a byte offset, and then back
12958 12956 * to a block address based upon a un->un_tgt_blocksize block size.
12959 12957 *
12960 12958 * xp->xb_blkno should be absolute upon entry into this function,
12961 12959 * but, but it is based upon partitions that use the "system"
12962 12960 * block size. It must be adjusted to reflect the block size of
12963 12961 * the target.
12964 12962 *
12965 12963 * Note that end_block is actually the block that follows the last
12966 12964 * block of the request, but that's what is needed for the computation.
12967 12965 */
12968 12966 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
12969 12967 if (un->un_f_enable_rmw) {
12970 12968 start_block = xp->xb_blkno =
12971 12969 (first_byte / un->un_phy_blocksize) *
12972 12970 (un->un_phy_blocksize / DEV_BSIZE);
12973 12971 end_block = ((first_byte + bp->b_bcount +
12974 12972 un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
12975 12973 (un->un_phy_blocksize / DEV_BSIZE);
12976 12974 } else {
12977 12975 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
12978 12976 end_block = (first_byte + bp->b_bcount +
12979 12977 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
12980 12978 }
12981 12979
12982 12980 /* request_bytes is rounded up to a multiple of the target block size */
12983 12981 request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
12984 12982
12985 12983 /*
12986 12984 * See if the starting address of the request and the request
12987 12985 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12988 12986 * then we do not need to allocate a shadow buf to handle the request.
12989 12987 */
12990 12988 if (un->un_f_enable_rmw) {
12991 12989 if (((first_byte % un->un_phy_blocksize) == 0) &&
12992 12990 ((bp->b_bcount % un->un_phy_blocksize) == 0)) {
12993 12991 is_aligned = TRUE;
12994 12992 }
12995 12993 } else {
12996 12994 if (((first_byte % un->un_tgt_blocksize) == 0) &&
12997 12995 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
12998 12996 is_aligned = TRUE;
12999 12997 }
13000 12998 }
13001 12999
13002 13000 if ((bp->b_flags & B_READ) == 0) {
13003 13001 /*
13004 13002 * Lock the range for a write operation. An aligned request is
13005 13003 * considered a simple write; otherwise the request must be a
13006 13004 * read-modify-write.
13007 13005 */
13008 13006 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
13009 13007 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
13010 13008 }
13011 13009
13012 13010 /*
13013 13011 * Alloc a shadow buf if the request is not aligned. Also, this is
13014 13012 * where the READ command is generated for a read-modify-write. (The
13015 13013 * write phase is deferred until after the read completes.)
13016 13014 */
13017 13015 if (is_aligned == FALSE) {
13018 13016
13019 13017 struct sd_mapblocksize_info *shadow_bsp;
13020 13018 struct sd_xbuf *shadow_xp;
13021 13019 struct buf *shadow_bp;
13022 13020
13023 13021 /*
13024 13022 * Allocate the shadow buf and it associated xbuf. Note that
13025 13023 * after this call the xb_blkno value in both the original
13026 13024 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
13027 13025 * same: absolute relative to the start of the device, and
13028 13026 * adjusted for the target block size. The b_blkno in the
13029 13027 * shadow buf will also be set to this value. We should never
13030 13028 * change b_blkno in the original bp however.
13031 13029 *
13032 13030 * Note also that the shadow buf will always need to be a
13033 13031 * READ command, regardless of whether the incoming command
13034 13032 * is a READ or a WRITE.
13035 13033 */
13036 13034 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
13037 13035 xp->xb_blkno,
13038 13036 (int (*)(struct buf *)) sd_mapblocksize_iodone);
13039 13037
13040 13038 shadow_xp = SD_GET_XBUF(shadow_bp);
13041 13039
13042 13040 /*
13043 13041 * Allocate the layer-private data for the shadow buf.
13044 13042 * (No need to preserve xb_private in the shadow xbuf.)
13045 13043 */
13046 13044 shadow_xp->xb_private = shadow_bsp =
13047 13045 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
13048 13046
13049 13047 /*
13050 13048 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13051 13049 * to figure out where the start of the user data is (based upon
13052 13050 * the system block size) in the data returned by the READ
13053 13051 * command (which will be based upon the target blocksize). Note
13054 13052 * that this is only really used if the request is unaligned.
13055 13053 */
13056 13054 if (un->un_f_enable_rmw) {
13057 13055 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13058 13056 ((offset_t)xp->xb_blkno * un->un_sys_blocksize));
13059 13057 ASSERT((bsp->mbs_copy_offset >= 0) &&
13060 13058 (bsp->mbs_copy_offset < un->un_phy_blocksize));
13061 13059 } else {
13062 13060 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13063 13061 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
13064 13062 ASSERT((bsp->mbs_copy_offset >= 0) &&
13065 13063 (bsp->mbs_copy_offset < un->un_tgt_blocksize));
13066 13064 }
13067 13065
13068 13066 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
13069 13067
13070 13068 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
13071 13069
13072 13070 /* Transfer the wmap (if any) to the shadow buf */
13073 13071 shadow_bsp->mbs_wmp = bsp->mbs_wmp;
13074 13072 bsp->mbs_wmp = NULL;
13075 13073
13076 13074 /*
13077 13075 * The shadow buf goes on from here in place of the
13078 13076 * original buf.
13079 13077 */
13080 13078 shadow_bsp->mbs_orig_bp = bp;
13081 13079 bp = shadow_bp;
13082 13080 }
13083 13081
13084 13082 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13085 13083 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
13086 13084 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13087 13085 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13088 13086 request_bytes);
13089 13087 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13090 13088 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
13091 13089
13092 13090 done:
13093 13091 SD_NEXT_IOSTART(index, un, bp);
13094 13092
13095 13093 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13096 13094 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
13097 13095 }
13098 13096
13099 13097
13100 13098 /*
13101 13099 * Function: sd_mapblocksize_iodone
13102 13100 *
13103 13101 * Description: Completion side processing for block-size mapping.
13104 13102 *
13105 13103 * Context: May be called under interrupt context
13106 13104 */
13107 13105
13108 13106 static void
13109 13107 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
13110 13108 {
13111 13109 struct sd_mapblocksize_info *bsp;
13112 13110 struct sd_xbuf *xp;
13113 13111 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */
13114 13112 struct buf *orig_bp; /* ptr to the original buf */
13115 13113 offset_t shadow_end;
13116 13114 offset_t request_end;
13117 13115 offset_t shadow_start;
13118 13116 ssize_t copy_offset;
13119 13117 size_t copy_length;
13120 13118 size_t shortfall;
13121 13119 uint_t is_write; /* TRUE if this bp is a WRITE */
13122 13120 uint_t has_wmap; /* TRUE is this bp has a wmap */
13123 13121
13124 13122 ASSERT(un != NULL);
13125 13123 ASSERT(bp != NULL);
13126 13124
13127 13125 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13128 13126 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
13129 13127
13130 13128 /*
13131 13129 * There is no shadow buf or layer-private data if the target is
13132 13130 * using un->un_sys_blocksize as its block size or if bcount == 0.
13133 13131 */
13134 13132 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
13135 13133 (bp->b_bcount == 0)) {
13136 13134 goto exit;
13137 13135 }
13138 13136
13139 13137 xp = SD_GET_XBUF(bp);
13140 13138 ASSERT(xp != NULL);
13141 13139
13142 13140 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13143 13141 bsp = xp->xb_private;
13144 13142
13145 13143 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
13146 13144 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
13147 13145
13148 13146 if (is_write) {
13149 13147 /*
13150 13148 * For a WRITE request we must free up the block range that
13151 13149 * we have locked up. This holds regardless of whether this is
13152 13150 * an aligned write request or a read-modify-write request.
13153 13151 */
13154 13152 sd_range_unlock(un, bsp->mbs_wmp);
13155 13153 bsp->mbs_wmp = NULL;
13156 13154 }
13157 13155
13158 13156 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) {
13159 13157 /*
13160 13158 * An aligned read or write command will have no shadow buf;
13161 13159 * there is not much else to do with it.
13162 13160 */
13163 13161 goto done;
13164 13162 }
13165 13163
13166 13164 orig_bp = bsp->mbs_orig_bp;
13167 13165 ASSERT(orig_bp != NULL);
13168 13166 orig_xp = SD_GET_XBUF(orig_bp);
13169 13167 ASSERT(orig_xp != NULL);
13170 13168 ASSERT(!mutex_owned(SD_MUTEX(un)));
13171 13169
13172 13170 if (!is_write && has_wmap) {
13173 13171 /*
13174 13172 * A READ with a wmap means this is the READ phase of a
13175 13173 * read-modify-write. If an error occurred on the READ then
13176 13174 * we do not proceed with the WRITE phase or copy any data.
13177 13175 * Just release the write maps and return with an error.
13178 13176 */
13179 13177 if ((bp->b_resid != 0) || (bp->b_error != 0)) {
13180 13178 orig_bp->b_resid = orig_bp->b_bcount;
13181 13179 bioerror(orig_bp, bp->b_error);
13182 13180 sd_range_unlock(un, bsp->mbs_wmp);
13183 13181 goto freebuf_done;
13184 13182 }
13185 13183 }
13186 13184
13187 13185 /*
13188 13186 * Here is where we set up to copy the data from the shadow buf
13189 13187 * into the space associated with the original buf.
13190 13188 *
13191 13189 * To deal with the conversion between block sizes, these
13192 13190 * computations treat the data as an array of bytes, with the
13193 13191 * first byte (byte 0) corresponding to the first byte in the
13194 13192 * first block on the disk.
13195 13193 */
13196 13194
13197 13195 /*
13198 13196 * shadow_start and shadow_len indicate the location and size of
13199 13197 * the data returned with the shadow IO request.
13200 13198 */
13201 13199 if (un->un_f_enable_rmw) {
13202 13200 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
13203 13201 } else {
13204 13202 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
13205 13203 }
13206 13204 shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
13207 13205
13208 13206 /*
13209 13207 * copy_offset gives the offset (in bytes) from the start of the first
13210 13208 * block of the READ request to the beginning of the data. We retrieve
13211 13209 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13212 13210 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13213 13211 * data to be copied (in bytes).
13214 13212 */
13215 13213 copy_offset = bsp->mbs_copy_offset;
13216 13214 if (un->un_f_enable_rmw) {
13217 13215 ASSERT((copy_offset >= 0) &&
13218 13216 (copy_offset < un->un_phy_blocksize));
13219 13217 } else {
13220 13218 ASSERT((copy_offset >= 0) &&
13221 13219 (copy_offset < un->un_tgt_blocksize));
13222 13220 }
13223 13221
13224 13222 copy_length = orig_bp->b_bcount;
13225 13223 request_end = shadow_start + copy_offset + orig_bp->b_bcount;
13226 13224
13227 13225 /*
13228 13226 * Set up the resid and error fields of orig_bp as appropriate.
13229 13227 */
13230 13228 if (shadow_end >= request_end) {
13231 13229 /* We got all the requested data; set resid to zero */
13232 13230 orig_bp->b_resid = 0;
13233 13231 } else {
13234 13232 /*
13235 13233 * We failed to get enough data to fully satisfy the original
13236 13234 * request. Just copy back whatever data we got and set
13237 13235 * up the residual and error code as required.
13238 13236 *
13239 13237 * 'shortfall' is the amount by which the data received with the
13240 13238 * shadow buf has "fallen short" of the requested amount.
13241 13239 */
13242 13240 shortfall = (size_t)(request_end - shadow_end);
13243 13241
13244 13242 if (shortfall > orig_bp->b_bcount) {
13245 13243 /*
13246 13244 * We did not get enough data to even partially
13247 13245 * fulfill the original request. The residual is
13248 13246 * equal to the amount requested.
13249 13247 */
13250 13248 orig_bp->b_resid = orig_bp->b_bcount;
13251 13249 } else {
13252 13250 /*
13253 13251 * We did not get all the data that we requested
13254 13252 * from the device, but we will try to return what
13255 13253 * portion we did get.
13256 13254 */
13257 13255 orig_bp->b_resid = shortfall;
13258 13256 }
13259 13257 ASSERT(copy_length >= orig_bp->b_resid);
13260 13258 copy_length -= orig_bp->b_resid;
13261 13259 }
13262 13260
13263 13261 /* Propagate the error code from the shadow buf to the original buf */
13264 13262 bioerror(orig_bp, bp->b_error);
13265 13263
13266 13264 if (is_write) {
13267 13265 goto freebuf_done; /* No data copying for a WRITE */
13268 13266 }
13269 13267
13270 13268 if (has_wmap) {
13271 13269 /*
13272 13270 * This is a READ command from the READ phase of a
13273 13271 * read-modify-write request. We have to copy the data given
13274 13272 * by the user OVER the data returned by the READ command,
13275 13273 * then convert the command from a READ to a WRITE and send
13276 13274 * it back to the target.
13277 13275 */
13278 13276 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
13279 13277 copy_length);
13280 13278
13281 13279 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */
13282 13280
13283 13281 /*
13284 13282 * Dispatch the WRITE command to the taskq thread, which
13285 13283 * will in turn send the command to the target. When the
13286 13284 * WRITE command completes, we (sd_mapblocksize_iodone())
13287 13285 * will get called again as part of the iodone chain
13288 13286 * processing for it. Note that we will still be dealing
13289 13287 * with the shadow buf at that point.
13290 13288 */
13291 13289 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
13292 13290 KM_NOSLEEP) != 0) {
13293 13291 /*
13294 13292 * Dispatch was successful so we are done. Return
13295 13293 * without going any higher up the iodone chain. Do
13296 13294 * not free up any layer-private data until after the
13297 13295 * WRITE completes.
13298 13296 */
13299 13297 return;
13300 13298 }
13301 13299
13302 13300 /*
13303 13301 * Dispatch of the WRITE command failed; set up the error
13304 13302 * condition and send this IO back up the iodone chain.
13305 13303 */
13306 13304 bioerror(orig_bp, EIO);
13307 13305 orig_bp->b_resid = orig_bp->b_bcount;
13308 13306
13309 13307 } else {
13310 13308 /*
13311 13309 * This is a regular READ request (ie, not a RMW). Copy the
13312 13310 * data from the shadow buf into the original buf. The
13313 13311 * copy_offset compensates for any "misalignment" between the
13314 13312 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13315 13313 * original buf (with its un->un_sys_blocksize blocks).
13316 13314 */
13317 13315 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
13318 13316 copy_length);
13319 13317 }
13320 13318
13321 13319 freebuf_done:
13322 13320
13323 13321 /*
13324 13322 * At this point we still have both the shadow buf AND the original
13325 13323 * buf to deal with, as well as the layer-private data area in each.
13326 13324 * Local variables are as follows:
13327 13325 *
13328 13326 * bp -- points to shadow buf
13329 13327 * xp -- points to xbuf of shadow buf
13330 13328 * bsp -- points to layer-private data area of shadow buf
13331 13329 * orig_bp -- points to original buf
13332 13330 *
13333 13331 * First free the shadow buf and its associated xbuf, then free the
13334 13332 * layer-private data area from the shadow buf. There is no need to
13335 13333 * restore xb_private in the shadow xbuf.
13336 13334 */
13337 13335 sd_shadow_buf_free(bp);
13338 13336 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13339 13337
13340 13338 /*
13341 13339 * Now update the local variables to point to the original buf, xbuf,
13342 13340 * and layer-private area.
13343 13341 */
13344 13342 bp = orig_bp;
13345 13343 xp = SD_GET_XBUF(bp);
13346 13344 ASSERT(xp != NULL);
13347 13345 ASSERT(xp == orig_xp);
13348 13346 bsp = xp->xb_private;
13349 13347 ASSERT(bsp != NULL);
13350 13348
13351 13349 done:
13352 13350 /*
13353 13351 * Restore xb_private to whatever it was set to by the next higher
13354 13352 * layer in the chain, then free the layer-private data area.
13355 13353 */
13356 13354 xp->xb_private = bsp->mbs_oprivate;
13357 13355 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13358 13356
13359 13357 exit:
13360 13358 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
13361 13359 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
13362 13360
13363 13361 SD_NEXT_IODONE(index, un, bp);
13364 13362 }
13365 13363
13366 13364
13367 13365 /*
13368 13366 * Function: sd_checksum_iostart
13369 13367 *
13370 13368 * Description: A stub function for a layer that's currently not used.
13371 13369 * For now just a placeholder.
13372 13370 *
13373 13371 * Context: Kernel thread context
13374 13372 */
13375 13373
13376 13374 static void
13377 13375 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
13378 13376 {
13379 13377 ASSERT(un != NULL);
13380 13378 ASSERT(bp != NULL);
13381 13379 ASSERT(!mutex_owned(SD_MUTEX(un)));
13382 13380 SD_NEXT_IOSTART(index, un, bp);
13383 13381 }
13384 13382
13385 13383
13386 13384 /*
13387 13385 * Function: sd_checksum_iodone
13388 13386 *
13389 13387 * Description: A stub function for a layer that's currently not used.
13390 13388 * For now just a placeholder.
13391 13389 *
13392 13390 * Context: May be called under interrupt context
13393 13391 */
13394 13392
13395 13393 static void
13396 13394 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
13397 13395 {
13398 13396 ASSERT(un != NULL);
13399 13397 ASSERT(bp != NULL);
13400 13398 ASSERT(!mutex_owned(SD_MUTEX(un)));
13401 13399 SD_NEXT_IODONE(index, un, bp);
13402 13400 }
13403 13401
13404 13402
13405 13403 /*
13406 13404 * Function: sd_checksum_uscsi_iostart
13407 13405 *
13408 13406 * Description: A stub function for a layer that's currently not used.
13409 13407 * For now just a placeholder.
13410 13408 *
13411 13409 * Context: Kernel thread context
13412 13410 */
13413 13411
13414 13412 static void
13415 13413 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
13416 13414 {
13417 13415 ASSERT(un != NULL);
13418 13416 ASSERT(bp != NULL);
13419 13417 ASSERT(!mutex_owned(SD_MUTEX(un)));
13420 13418 SD_NEXT_IOSTART(index, un, bp);
13421 13419 }
13422 13420
13423 13421
13424 13422 /*
13425 13423 * Function: sd_checksum_uscsi_iodone
13426 13424 *
13427 13425 * Description: A stub function for a layer that's currently not used.
13428 13426 * For now just a placeholder.
13429 13427 *
13430 13428 * Context: May be called under interrupt context
13431 13429 */
13432 13430
13433 13431 static void
13434 13432 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
13435 13433 {
13436 13434 ASSERT(un != NULL);
13437 13435 ASSERT(bp != NULL);
13438 13436 ASSERT(!mutex_owned(SD_MUTEX(un)));
13439 13437 SD_NEXT_IODONE(index, un, bp);
13440 13438 }
13441 13439
13442 13440
13443 13441 /*
13444 13442 * Function: sd_pm_iostart
13445 13443 *
13446 13444 * Description: iostart-side routine for Power mangement.
13447 13445 *
13448 13446 * Context: Kernel thread context
13449 13447 */
13450 13448
13451 13449 static void
13452 13450 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
13453 13451 {
13454 13452 ASSERT(un != NULL);
13455 13453 ASSERT(bp != NULL);
13456 13454 ASSERT(!mutex_owned(SD_MUTEX(un)));
13457 13455 ASSERT(!mutex_owned(&un->un_pm_mutex));
13458 13456
13459 13457 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
13460 13458
13461 13459 if (sd_pm_entry(un) != DDI_SUCCESS) {
13462 13460 /*
13463 13461 * Set up to return the failed buf back up the 'iodone'
13464 13462 * side of the calling chain.
13465 13463 */
13466 13464 bioerror(bp, EIO);
13467 13465 bp->b_resid = bp->b_bcount;
13468 13466
13469 13467 SD_BEGIN_IODONE(index, un, bp);
13470 13468
13471 13469 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13472 13470 return;
13473 13471 }
13474 13472
13475 13473 SD_NEXT_IOSTART(index, un, bp);
13476 13474
13477 13475 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13478 13476 }
13479 13477
13480 13478
13481 13479 /*
13482 13480 * Function: sd_pm_iodone
13483 13481 *
13484 13482 * Description: iodone-side routine for power mangement.
13485 13483 *
13486 13484 * Context: may be called from interrupt context
13487 13485 */
13488 13486
13489 13487 static void
13490 13488 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
13491 13489 {
13492 13490 ASSERT(un != NULL);
13493 13491 ASSERT(bp != NULL);
13494 13492 ASSERT(!mutex_owned(&un->un_pm_mutex));
13495 13493
13496 13494 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
13497 13495
13498 13496 /*
13499 13497 * After attach the following flag is only read, so don't
13500 13498 * take the penalty of acquiring a mutex for it.
13501 13499 */
13502 13500 if (un->un_f_pm_is_enabled == TRUE) {
13503 13501 sd_pm_exit(un);
13504 13502 }
13505 13503
13506 13504 SD_NEXT_IODONE(index, un, bp);
13507 13505
13508 13506 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
13509 13507 }
13510 13508
13511 13509
13512 13510 /*
13513 13511 * Function: sd_core_iostart
13514 13512 *
13515 13513 * Description: Primary driver function for enqueuing buf(9S) structs from
13516 13514 * the system and initiating IO to the target device
13517 13515 *
13518 13516 * Context: Kernel thread context. Can sleep.
13519 13517 *
13520 13518 * Assumptions: - The given xp->xb_blkno is absolute
13521 13519 * (ie, relative to the start of the device).
13522 13520 * - The IO is to be done using the native blocksize of
13523 13521 * the device, as specified in un->un_tgt_blocksize.
13524 13522 */
13525 13523 /* ARGSUSED */
13526 13524 static void
13527 13525 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
13528 13526 {
13529 13527 struct sd_xbuf *xp;
13530 13528
13531 13529 ASSERT(un != NULL);
13532 13530 ASSERT(bp != NULL);
13533 13531 ASSERT(!mutex_owned(SD_MUTEX(un)));
13534 13532 ASSERT(bp->b_resid == 0);
13535 13533
13536 13534 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
13537 13535
13538 13536 xp = SD_GET_XBUF(bp);
13539 13537 ASSERT(xp != NULL);
13540 13538
13541 13539 mutex_enter(SD_MUTEX(un));
13542 13540
13543 13541 /*
13544 13542 * If we are currently in the failfast state, fail any new IO
13545 13543 * that has B_FAILFAST set, then return.
13546 13544 */
13547 13545 if ((bp->b_flags & B_FAILFAST) &&
13548 13546 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
13549 13547 mutex_exit(SD_MUTEX(un));
13550 13548 bioerror(bp, EIO);
13551 13549 bp->b_resid = bp->b_bcount;
13552 13550 SD_BEGIN_IODONE(index, un, bp);
13553 13551 return;
13554 13552 }
13555 13553
13556 13554 if (SD_IS_DIRECT_PRIORITY(xp)) {
13557 13555 /*
13558 13556 * Priority command -- transport it immediately.
13559 13557 *
13560 13558 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13561 13559 * because all direct priority commands should be associated
13562 13560 * with error recovery actions which we don't want to retry.
13563 13561 */
13564 13562 sd_start_cmds(un, bp);
13565 13563 } else {
13566 13564 /*
13567 13565 * Normal command -- add it to the wait queue, then start
13568 13566 * transporting commands from the wait queue.
13569 13567 */
13570 13568 sd_add_buf_to_waitq(un, bp);
13571 13569 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
13572 13570 sd_start_cmds(un, NULL);
13573 13571 }
13574 13572
13575 13573 mutex_exit(SD_MUTEX(un));
13576 13574
13577 13575 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
13578 13576 }
13579 13577
13580 13578
13581 13579 /*
13582 13580 * Function: sd_init_cdb_limits
13583 13581 *
13584 13582 * Description: This is to handle scsi_pkt initialization differences
13585 13583 * between the driver platforms.
13586 13584 *
13587 13585 * Legacy behaviors:
13588 13586 *
13589 13587 * If the block number or the sector count exceeds the
13590 13588 * capabilities of a Group 0 command, shift over to a
13591 13589 * Group 1 command. We don't blindly use Group 1
13592 13590 * commands because a) some drives (CDC Wren IVs) get a
13593 13591 * bit confused, and b) there is probably a fair amount
13594 13592 * of speed difference for a target to receive and decode
13595 13593 * a 10 byte command instead of a 6 byte command.
13596 13594 *
13597 13595 * The xfer time difference of 6 vs 10 byte CDBs is
13598 13596 * still significant so this code is still worthwhile.
13599 13597 * 10 byte CDBs are very inefficient with the fas HBA driver
13600 13598 * and older disks. Each CDB byte took 1 usec with some
13601 13599 * popular disks.
13602 13600 *
13603 13601 * Context: Must be called at attach time
13604 13602 */
13605 13603
13606 13604 static void
13607 13605 sd_init_cdb_limits(struct sd_lun *un)
13608 13606 {
13609 13607 int hba_cdb_limit;
13610 13608
13611 13609 /*
13612 13610 * Use CDB_GROUP1 commands for most devices except for
13613 13611 * parallel SCSI fixed drives in which case we get better
13614 13612 * performance using CDB_GROUP0 commands (where applicable).
13615 13613 */
13616 13614 un->un_mincdb = SD_CDB_GROUP1;
13617 13615 #if !defined(__fibre)
13618 13616 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13619 13617 !un->un_f_has_removable_media) {
13620 13618 un->un_mincdb = SD_CDB_GROUP0;
13621 13619 }
13622 13620 #endif
13623 13621
13624 13622 /*
13625 13623 * Try to read the max-cdb-length supported by HBA.
13626 13624 */
13627 13625 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13628 13626 if (0 >= un->un_max_hba_cdb) {
13629 13627 un->un_max_hba_cdb = CDB_GROUP4;
13630 13628 hba_cdb_limit = SD_CDB_GROUP4;
13631 13629 } else if (0 < un->un_max_hba_cdb &&
13632 13630 un->un_max_hba_cdb < CDB_GROUP1) {
13633 13631 hba_cdb_limit = SD_CDB_GROUP0;
13634 13632 } else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
13635 13633 un->un_max_hba_cdb < CDB_GROUP5) {
13636 13634 hba_cdb_limit = SD_CDB_GROUP1;
13637 13635 } else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
13638 13636 un->un_max_hba_cdb < CDB_GROUP4) {
13639 13637 hba_cdb_limit = SD_CDB_GROUP5;
13640 13638 } else {
13641 13639 hba_cdb_limit = SD_CDB_GROUP4;
13642 13640 }
13643 13641
13644 13642 /*
13645 13643 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13646 13644 * commands for fixed disks unless we are building for a 32 bit
13647 13645 * kernel.
13648 13646 */
13649 13647 #ifdef _LP64
13650 13648 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13651 13649 min(hba_cdb_limit, SD_CDB_GROUP4);
13652 13650 #else
13653 13651 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13654 13652 min(hba_cdb_limit, SD_CDB_GROUP1);
13655 13653 #endif
13656 13654
13657 13655 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13658 13656 ? sizeof (struct scsi_arq_status) : 1);
13659 13657 if (!ISCD(un))
13660 13658 un->un_cmd_timeout = (ushort_t)sd_io_time;
13661 13659 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13662 13660 }
13663 13661
13664 13662
13665 13663 /*
13666 13664 * Function: sd_initpkt_for_buf
13667 13665 *
13668 13666 * Description: Allocate and initialize for transport a scsi_pkt struct,
13669 13667 * based upon the info specified in the given buf struct.
13670 13668 *
13671 13669 * Assumes the xb_blkno in the request is absolute (ie,
13672 13670 * relative to the start of the device (NOT partition!).
13673 13671 * Also assumes that the request is using the native block
13674 13672 * size of the device (as returned by the READ CAPACITY
13675 13673 * command).
13676 13674 *
13677 13675 * Return Code: SD_PKT_ALLOC_SUCCESS
13678 13676 * SD_PKT_ALLOC_FAILURE
13679 13677 * SD_PKT_ALLOC_FAILURE_NO_DMA
13680 13678 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13681 13679 *
13682 13680 * Context: Kernel thread and may be called from software interrupt context
13683 13681 * as part of a sdrunout callback. This function may not block or
13684 13682 * call routines that block
13685 13683 */
13686 13684
13687 13685 static int
13688 13686 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
13689 13687 {
13690 13688 struct sd_xbuf *xp;
13691 13689 struct scsi_pkt *pktp = NULL;
13692 13690 struct sd_lun *un;
13693 13691 size_t blockcount;
13694 13692 daddr_t startblock;
13695 13693 int rval;
13696 13694 int cmd_flags;
13697 13695
13698 13696 ASSERT(bp != NULL);
13699 13697 ASSERT(pktpp != NULL);
13700 13698 xp = SD_GET_XBUF(bp);
13701 13699 ASSERT(xp != NULL);
13702 13700 un = SD_GET_UN(bp);
13703 13701 ASSERT(un != NULL);
13704 13702 ASSERT(mutex_owned(SD_MUTEX(un)));
13705 13703 ASSERT(bp->b_resid == 0);
13706 13704
13707 13705 SD_TRACE(SD_LOG_IO_CORE, un,
13708 13706 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13709 13707
13710 13708 mutex_exit(SD_MUTEX(un));
13711 13709
13712 13710 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13713 13711 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13714 13712 /*
13715 13713 * Already have a scsi_pkt -- just need DMA resources.
13716 13714 * We must recompute the CDB in case the mapping returns
13717 13715 * a nonzero pkt_resid.
13718 13716 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13719 13717 * that is being retried, the unmap/remap of the DMA resouces
13720 13718 * will result in the entire transfer starting over again
13721 13719 * from the very first block.
13722 13720 */
13723 13721 ASSERT(xp->xb_pktp != NULL);
13724 13722 pktp = xp->xb_pktp;
13725 13723 } else {
13726 13724 pktp = NULL;
13727 13725 }
13728 13726 #endif /* __i386 || __amd64 */
13729 13727
13730 13728 startblock = xp->xb_blkno; /* Absolute block num. */
13731 13729 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13732 13730
13733 13731 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13734 13732
13735 13733 /*
13736 13734 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13737 13735 * call scsi_init_pkt, and build the CDB.
13738 13736 */
13739 13737 rval = sd_setup_rw_pkt(un, &pktp, bp,
13740 13738 cmd_flags, sdrunout, (caddr_t)un,
13741 13739 startblock, blockcount);
13742 13740
13743 13741 if (rval == 0) {
13744 13742 /*
13745 13743 * Success.
13746 13744 *
13747 13745 * If partial DMA is being used and required for this transfer.
13748 13746 * set it up here.
13749 13747 */
13750 13748 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
13751 13749 (pktp->pkt_resid != 0)) {
13752 13750
13753 13751 /*
13754 13752 * Save the CDB length and pkt_resid for the
13755 13753 * next xfer
13756 13754 */
13757 13755 xp->xb_dma_resid = pktp->pkt_resid;
13758 13756
13759 13757 /* rezero resid */
13760 13758 pktp->pkt_resid = 0;
13761 13759
13762 13760 } else {
13763 13761 xp->xb_dma_resid = 0;
13764 13762 }
13765 13763
13766 13764 pktp->pkt_flags = un->un_tagflags;
13767 13765 pktp->pkt_time = un->un_cmd_timeout;
13768 13766 pktp->pkt_comp = sdintr;
13769 13767
13770 13768 pktp->pkt_private = bp;
13771 13769 *pktpp = pktp;
13772 13770
13773 13771 SD_TRACE(SD_LOG_IO_CORE, un,
13774 13772 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13775 13773
13776 13774 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13777 13775 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13778 13776 #endif
13779 13777
13780 13778 mutex_enter(SD_MUTEX(un));
13781 13779 return (SD_PKT_ALLOC_SUCCESS);
13782 13780
13783 13781 }
13784 13782
13785 13783 /*
13786 13784 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13787 13785 * from sd_setup_rw_pkt.
13788 13786 */
13789 13787 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
13790 13788
13791 13789 if (rval == SD_PKT_ALLOC_FAILURE) {
13792 13790 *pktpp = NULL;
13793 13791 /*
13794 13792 * Set the driver state to RWAIT to indicate the driver
13795 13793 * is waiting on resource allocations. The driver will not
13796 13794 * suspend, pm_suspend, or detatch while the state is RWAIT.
13797 13795 */
13798 13796 mutex_enter(SD_MUTEX(un));
13799 13797 New_state(un, SD_STATE_RWAIT);
13800 13798
13801 13799 SD_ERROR(SD_LOG_IO_CORE, un,
13802 13800 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
13803 13801
13804 13802 if ((bp->b_flags & B_ERROR) != 0) {
13805 13803 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
13806 13804 }
13807 13805 return (SD_PKT_ALLOC_FAILURE);
13808 13806 } else {
13809 13807 /*
13810 13808 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13811 13809 *
13812 13810 * This should never happen. Maybe someone messed with the
13813 13811 * kernel's minphys?
13814 13812 */
13815 13813 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
13816 13814 "Request rejected: too large for CDB: "
13817 13815 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
13818 13816 SD_ERROR(SD_LOG_IO_CORE, un,
13819 13817 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
13820 13818 mutex_enter(SD_MUTEX(un));
13821 13819 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13822 13820
13823 13821 }
13824 13822 }
13825 13823
13826 13824
13827 13825 /*
13828 13826 * Function: sd_destroypkt_for_buf
13829 13827 *
13830 13828 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13831 13829 *
13832 13830 * Context: Kernel thread or interrupt context
13833 13831 */
13834 13832
13835 13833 static void
13836 13834 sd_destroypkt_for_buf(struct buf *bp)
13837 13835 {
13838 13836 ASSERT(bp != NULL);
13839 13837 ASSERT(SD_GET_UN(bp) != NULL);
13840 13838
13841 13839 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13842 13840 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
13843 13841
13844 13842 ASSERT(SD_GET_PKTP(bp) != NULL);
13845 13843 scsi_destroy_pkt(SD_GET_PKTP(bp));
13846 13844
13847 13845 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13848 13846 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
13849 13847 }
13850 13848
13851 13849 /*
13852 13850 * Function: sd_setup_rw_pkt
13853 13851 *
13854 13852 * Description: Determines appropriate CDB group for the requested LBA
13855 13853 * and transfer length, calls scsi_init_pkt, and builds
13856 13854 * the CDB. Do not use for partial DMA transfers except
13857 13855 * for the initial transfer since the CDB size must
13858 13856 * remain constant.
13859 13857 *
13860 13858 * Context: Kernel thread and may be called from software interrupt
13861 13859 * context as part of a sdrunout callback. This function may not
13862 13860 * block or call routines that block
13863 13861 */
13864 13862
13865 13863
13866 13864 int
13867 13865 sd_setup_rw_pkt(struct sd_lun *un,
13868 13866 struct scsi_pkt **pktpp, struct buf *bp, int flags,
13869 13867 int (*callback)(caddr_t), caddr_t callback_arg,
13870 13868 diskaddr_t lba, uint32_t blockcount)
13871 13869 {
13872 13870 struct scsi_pkt *return_pktp;
13873 13871 union scsi_cdb *cdbp;
13874 13872 struct sd_cdbinfo *cp = NULL;
13875 13873 int i;
13876 13874
13877 13875 /*
13878 13876 * See which size CDB to use, based upon the request.
13879 13877 */
13880 13878 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
13881 13879
13882 13880 /*
13883 13881 * Check lba and block count against sd_cdbtab limits.
13884 13882 * In the partial DMA case, we have to use the same size
13885 13883 * CDB for all the transfers. Check lba + blockcount
13886 13884 * against the max LBA so we know that segment of the
13887 13885 * transfer can use the CDB we select.
13888 13886 */
13889 13887 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
13890 13888 (blockcount <= sd_cdbtab[i].sc_maxlen)) {
13891 13889
13892 13890 /*
13893 13891 * The command will fit into the CDB type
13894 13892 * specified by sd_cdbtab[i].
13895 13893 */
13896 13894 cp = sd_cdbtab + i;
13897 13895
13898 13896 /*
13899 13897 * Call scsi_init_pkt so we can fill in the
13900 13898 * CDB.
13901 13899 */
13902 13900 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
13903 13901 bp, cp->sc_grpcode, un->un_status_len, 0,
13904 13902 flags, callback, callback_arg);
13905 13903
13906 13904 if (return_pktp != NULL) {
13907 13905
13908 13906 /*
13909 13907 * Return new value of pkt
13910 13908 */
13911 13909 *pktpp = return_pktp;
13912 13910
13913 13911 /*
13914 13912 * To be safe, zero the CDB insuring there is
13915 13913 * no leftover data from a previous command.
13916 13914 */
13917 13915 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
13918 13916
13919 13917 /*
13920 13918 * Handle partial DMA mapping
13921 13919 */
13922 13920 if (return_pktp->pkt_resid != 0) {
13923 13921
13924 13922 /*
13925 13923 * Not going to xfer as many blocks as
13926 13924 * originally expected
13927 13925 */
13928 13926 blockcount -=
13929 13927 SD_BYTES2TGTBLOCKS(un,
13930 13928 return_pktp->pkt_resid);
13931 13929 }
13932 13930
13933 13931 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
13934 13932
13935 13933 /*
13936 13934 * Set command byte based on the CDB
13937 13935 * type we matched.
13938 13936 */
13939 13937 cdbp->scc_cmd = cp->sc_grpmask |
13940 13938 ((bp->b_flags & B_READ) ?
13941 13939 SCMD_READ : SCMD_WRITE);
13942 13940
13943 13941 SD_FILL_SCSI1_LUN(un, return_pktp);
13944 13942
13945 13943 /*
13946 13944 * Fill in LBA and length
13947 13945 */
13948 13946 ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
13949 13947 (cp->sc_grpcode == CDB_GROUP4) ||
13950 13948 (cp->sc_grpcode == CDB_GROUP0) ||
13951 13949 (cp->sc_grpcode == CDB_GROUP5));
13952 13950
13953 13951 if (cp->sc_grpcode == CDB_GROUP1) {
13954 13952 FORMG1ADDR(cdbp, lba);
13955 13953 FORMG1COUNT(cdbp, blockcount);
13956 13954 return (0);
13957 13955 } else if (cp->sc_grpcode == CDB_GROUP4) {
13958 13956 FORMG4LONGADDR(cdbp, lba);
13959 13957 FORMG4COUNT(cdbp, blockcount);
13960 13958 return (0);
13961 13959 } else if (cp->sc_grpcode == CDB_GROUP0) {
13962 13960 FORMG0ADDR(cdbp, lba);
13963 13961 FORMG0COUNT(cdbp, blockcount);
13964 13962 return (0);
13965 13963 } else if (cp->sc_grpcode == CDB_GROUP5) {
13966 13964 FORMG5ADDR(cdbp, lba);
13967 13965 FORMG5COUNT(cdbp, blockcount);
13968 13966 return (0);
13969 13967 }
13970 13968
13971 13969 /*
13972 13970 * It should be impossible to not match one
13973 13971 * of the CDB types above, so we should never
13974 13972 * reach this point. Set the CDB command byte
13975 13973 * to test-unit-ready to avoid writing
13976 13974 * to somewhere we don't intend.
13977 13975 */
13978 13976 cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
13979 13977 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13980 13978 } else {
13981 13979 /*
13982 13980 * Couldn't get scsi_pkt
13983 13981 */
13984 13982 return (SD_PKT_ALLOC_FAILURE);
13985 13983 }
13986 13984 }
13987 13985 }
13988 13986
13989 13987 /*
13990 13988 * None of the available CDB types were suitable. This really
13991 13989 * should never happen: on a 64 bit system we support
13992 13990 * READ16/WRITE16 which will hold an entire 64 bit disk address
13993 13991 * and on a 32 bit system we will refuse to bind to a device
13994 13992 * larger than 2TB so addresses will never be larger than 32 bits.
13995 13993 */
13996 13994 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13997 13995 }
13998 13996
13999 13997 /*
14000 13998 * Function: sd_setup_next_rw_pkt
14001 13999 *
14002 14000 * Description: Setup packet for partial DMA transfers, except for the
14003 14001 * initial transfer. sd_setup_rw_pkt should be used for
14004 14002 * the initial transfer.
14005 14003 *
14006 14004 * Context: Kernel thread and may be called from interrupt context.
14007 14005 */
14008 14006
14009 14007 int
14010 14008 sd_setup_next_rw_pkt(struct sd_lun *un,
14011 14009 struct scsi_pkt *pktp, struct buf *bp,
14012 14010 diskaddr_t lba, uint32_t blockcount)
14013 14011 {
14014 14012 uchar_t com;
14015 14013 union scsi_cdb *cdbp;
14016 14014 uchar_t cdb_group_id;
14017 14015
14018 14016 ASSERT(pktp != NULL);
14019 14017 ASSERT(pktp->pkt_cdbp != NULL);
14020 14018
14021 14019 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
14022 14020 com = cdbp->scc_cmd;
14023 14021 cdb_group_id = CDB_GROUPID(com);
14024 14022
14025 14023 ASSERT((cdb_group_id == CDB_GROUPID_0) ||
14026 14024 (cdb_group_id == CDB_GROUPID_1) ||
14027 14025 (cdb_group_id == CDB_GROUPID_4) ||
14028 14026 (cdb_group_id == CDB_GROUPID_5));
14029 14027
14030 14028 /*
14031 14029 * Move pkt to the next portion of the xfer.
14032 14030 * func is NULL_FUNC so we do not have to release
14033 14031 * the disk mutex here.
14034 14032 */
14035 14033 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
14036 14034 NULL_FUNC, NULL) == pktp) {
14037 14035 /* Success. Handle partial DMA */
14038 14036 if (pktp->pkt_resid != 0) {
14039 14037 blockcount -=
14040 14038 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
14041 14039 }
14042 14040
14043 14041 cdbp->scc_cmd = com;
14044 14042 SD_FILL_SCSI1_LUN(un, pktp);
14045 14043 if (cdb_group_id == CDB_GROUPID_1) {
14046 14044 FORMG1ADDR(cdbp, lba);
14047 14045 FORMG1COUNT(cdbp, blockcount);
14048 14046 return (0);
14049 14047 } else if (cdb_group_id == CDB_GROUPID_4) {
14050 14048 FORMG4LONGADDR(cdbp, lba);
14051 14049 FORMG4COUNT(cdbp, blockcount);
14052 14050 return (0);
14053 14051 } else if (cdb_group_id == CDB_GROUPID_0) {
14054 14052 FORMG0ADDR(cdbp, lba);
14055 14053 FORMG0COUNT(cdbp, blockcount);
14056 14054 return (0);
14057 14055 } else if (cdb_group_id == CDB_GROUPID_5) {
14058 14056 FORMG5ADDR(cdbp, lba);
14059 14057 FORMG5COUNT(cdbp, blockcount);
14060 14058 return (0);
14061 14059 }
14062 14060
14063 14061 /* Unreachable */
14064 14062 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
14065 14063 }
14066 14064
14067 14065 /*
14068 14066 * Error setting up next portion of cmd transfer.
14069 14067 * Something is definitely very wrong and this
14070 14068 * should not happen.
14071 14069 */
14072 14070 return (SD_PKT_ALLOC_FAILURE);
14073 14071 }
14074 14072
14075 14073 /*
14076 14074 * Function: sd_initpkt_for_uscsi
14077 14075 *
14078 14076 * Description: Allocate and initialize for transport a scsi_pkt struct,
14079 14077 * based upon the info specified in the given uscsi_cmd struct.
14080 14078 *
14081 14079 * Return Code: SD_PKT_ALLOC_SUCCESS
14082 14080 * SD_PKT_ALLOC_FAILURE
14083 14081 * SD_PKT_ALLOC_FAILURE_NO_DMA
14084 14082 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14085 14083 *
14086 14084 * Context: Kernel thread and may be called from software interrupt context
14087 14085 * as part of a sdrunout callback. This function may not block or
14088 14086 * call routines that block
14089 14087 */
14090 14088
14091 14089 static int
14092 14090 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
14093 14091 {
14094 14092 struct uscsi_cmd *uscmd;
14095 14093 struct sd_xbuf *xp;
14096 14094 struct scsi_pkt *pktp;
14097 14095 struct sd_lun *un;
14098 14096 uint32_t flags = 0;
14099 14097
14100 14098 ASSERT(bp != NULL);
14101 14099 ASSERT(pktpp != NULL);
14102 14100 xp = SD_GET_XBUF(bp);
14103 14101 ASSERT(xp != NULL);
14104 14102 un = SD_GET_UN(bp);
14105 14103 ASSERT(un != NULL);
14106 14104 ASSERT(mutex_owned(SD_MUTEX(un)));
14107 14105
14108 14106 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14109 14107 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14110 14108 ASSERT(uscmd != NULL);
14111 14109
14112 14110 SD_TRACE(SD_LOG_IO_CORE, un,
14113 14111 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
14114 14112
14115 14113 /*
14116 14114 * Allocate the scsi_pkt for the command.
14117 14115 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14118 14116 * during scsi_init_pkt time and will continue to use the
14119 14117 * same path as long as the same scsi_pkt is used without
14120 14118 * intervening scsi_dma_free(). Since uscsi command does
14121 14119 * not call scsi_dmafree() before retry failed command, it
14122 14120 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14123 14121 * set such that scsi_vhci can use other available path for
14124 14122 * retry. Besides, ucsci command does not allow DMA breakup,
14125 14123 * so there is no need to set PKT_DMA_PARTIAL flag.
14126 14124 */
14127 14125 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14128 14126 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14129 14127 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14130 14128 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
14131 14129 - sizeof (struct scsi_extended_sense)), 0,
14132 14130 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
14133 14131 sdrunout, (caddr_t)un);
14134 14132 } else {
14135 14133 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14136 14134 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14137 14135 sizeof (struct scsi_arq_status), 0,
14138 14136 (un->un_pkt_flags & ~PKT_DMA_PARTIAL),
14139 14137 sdrunout, (caddr_t)un);
14140 14138 }
14141 14139
14142 14140 if (pktp == NULL) {
14143 14141 *pktpp = NULL;
14144 14142 /*
14145 14143 * Set the driver state to RWAIT to indicate the driver
14146 14144 * is waiting on resource allocations. The driver will not
14147 14145 * suspend, pm_suspend, or detatch while the state is RWAIT.
14148 14146 */
14149 14147 New_state(un, SD_STATE_RWAIT);
14150 14148
14151 14149 SD_ERROR(SD_LOG_IO_CORE, un,
14152 14150 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
14153 14151
14154 14152 if ((bp->b_flags & B_ERROR) != 0) {
14155 14153 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
14156 14154 }
14157 14155 return (SD_PKT_ALLOC_FAILURE);
14158 14156 }
14159 14157
14160 14158 /*
14161 14159 * We do not do DMA breakup for USCSI commands, so return failure
14162 14160 * here if all the needed DMA resources were not allocated.
14163 14161 */
14164 14162 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
14165 14163 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
14166 14164 scsi_destroy_pkt(pktp);
14167 14165 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
14168 14166 "No partial DMA for USCSI. exit: buf:0x%p\n", bp);
14169 14167 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
14170 14168 }
14171 14169
14172 14170 /* Init the cdb from the given uscsi struct */
14173 14171 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
14174 14172 uscmd->uscsi_cdb[0], 0, 0, 0);
14175 14173
14176 14174 SD_FILL_SCSI1_LUN(un, pktp);
14177 14175
14178 14176 /*
14179 14177 * Set up the optional USCSI flags. See the uscsi (7I) man page
14180 14178 * for listing of the supported flags.
14181 14179 */
14182 14180
14183 14181 if (uscmd->uscsi_flags & USCSI_SILENT) {
14184 14182 flags |= FLAG_SILENT;
14185 14183 }
14186 14184
14187 14185 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
14188 14186 flags |= FLAG_DIAGNOSE;
14189 14187 }
14190 14188
14191 14189 if (uscmd->uscsi_flags & USCSI_ISOLATE) {
14192 14190 flags |= FLAG_ISOLATE;
14193 14191 }
14194 14192
14195 14193 if (un->un_f_is_fibre == FALSE) {
14196 14194 if (uscmd->uscsi_flags & USCSI_RENEGOT) {
14197 14195 flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
14198 14196 }
14199 14197 }
14200 14198
14201 14199 /*
14202 14200 * Set the pkt flags here so we save time later.
14203 14201 * Note: These flags are NOT in the uscsi man page!!!
14204 14202 */
14205 14203 if (uscmd->uscsi_flags & USCSI_HEAD) {
14206 14204 flags |= FLAG_HEAD;
14207 14205 }
14208 14206
14209 14207 if (uscmd->uscsi_flags & USCSI_NOINTR) {
14210 14208 flags |= FLAG_NOINTR;
14211 14209 }
14212 14210
14213 14211 /*
14214 14212 * For tagged queueing, things get a bit complicated.
14215 14213 * Check first for head of queue and last for ordered queue.
14216 14214 * If neither head nor order, use the default driver tag flags.
14217 14215 */
14218 14216 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
14219 14217 if (uscmd->uscsi_flags & USCSI_HTAG) {
14220 14218 flags |= FLAG_HTAG;
14221 14219 } else if (uscmd->uscsi_flags & USCSI_OTAG) {
14222 14220 flags |= FLAG_OTAG;
14223 14221 } else {
14224 14222 flags |= un->un_tagflags & FLAG_TAGMASK;
14225 14223 }
14226 14224 }
14227 14225
14228 14226 if (uscmd->uscsi_flags & USCSI_NODISCON) {
14229 14227 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
14230 14228 }
14231 14229
14232 14230 pktp->pkt_flags = flags;
14233 14231
14234 14232 /* Transfer uscsi information to scsi_pkt */
14235 14233 (void) scsi_uscsi_pktinit(uscmd, pktp);
14236 14234
14237 14235 /* Copy the caller's CDB into the pkt... */
14238 14236 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
14239 14237
14240 14238 if (uscmd->uscsi_timeout == 0) {
14241 14239 pktp->pkt_time = un->un_uscsi_timeout;
14242 14240 } else {
14243 14241 pktp->pkt_time = uscmd->uscsi_timeout;
14244 14242 }
14245 14243
14246 14244 /* need it later to identify USCSI request in sdintr */
14247 14245 xp->xb_pkt_flags |= SD_XB_USCSICMD;
14248 14246
14249 14247 xp->xb_sense_resid = uscmd->uscsi_rqresid;
14250 14248
14251 14249 pktp->pkt_private = bp;
14252 14250 pktp->pkt_comp = sdintr;
14253 14251 *pktpp = pktp;
14254 14252
14255 14253 SD_TRACE(SD_LOG_IO_CORE, un,
14256 14254 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
14257 14255
14258 14256 return (SD_PKT_ALLOC_SUCCESS);
14259 14257 }
14260 14258
14261 14259
14262 14260 /*
14263 14261 * Function: sd_destroypkt_for_uscsi
14264 14262 *
14265 14263 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14266 14264 * IOs.. Also saves relevant info into the associated uscsi_cmd
14267 14265 * struct.
14268 14266 *
14269 14267 * Context: May be called under interrupt context
14270 14268 */
14271 14269
14272 14270 static void
14273 14271 sd_destroypkt_for_uscsi(struct buf *bp)
14274 14272 {
14275 14273 struct uscsi_cmd *uscmd;
14276 14274 struct sd_xbuf *xp;
14277 14275 struct scsi_pkt *pktp;
14278 14276 struct sd_lun *un;
14279 14277 struct sd_uscsi_info *suip;
14280 14278
14281 14279 ASSERT(bp != NULL);
14282 14280 xp = SD_GET_XBUF(bp);
14283 14281 ASSERT(xp != NULL);
14284 14282 un = SD_GET_UN(bp);
14285 14283 ASSERT(un != NULL);
14286 14284 ASSERT(!mutex_owned(SD_MUTEX(un)));
14287 14285 pktp = SD_GET_PKTP(bp);
14288 14286 ASSERT(pktp != NULL);
14289 14287
14290 14288 SD_TRACE(SD_LOG_IO_CORE, un,
14291 14289 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
14292 14290
14293 14291 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14294 14292 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14295 14293 ASSERT(uscmd != NULL);
14296 14294
14297 14295 /* Save the status and the residual into the uscsi_cmd struct */
14298 14296 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
14299 14297 uscmd->uscsi_resid = bp->b_resid;
14300 14298
14301 14299 /* Transfer scsi_pkt information to uscsi */
14302 14300 (void) scsi_uscsi_pktfini(pktp, uscmd);
14303 14301
14304 14302 /*
14305 14303 * If enabled, copy any saved sense data into the area specified
14306 14304 * by the uscsi command.
14307 14305 */
14308 14306 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
14309 14307 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
14310 14308 /*
14311 14309 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14312 14310 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14313 14311 */
14314 14312 uscmd->uscsi_rqstatus = xp->xb_sense_status;
14315 14313 uscmd->uscsi_rqresid = xp->xb_sense_resid;
14316 14314 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14317 14315 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14318 14316 MAX_SENSE_LENGTH);
14319 14317 } else {
14320 14318 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14321 14319 SENSE_LENGTH);
14322 14320 }
14323 14321 }
14324 14322 /*
14325 14323 * The following assignments are for SCSI FMA.
14326 14324 */
14327 14325 ASSERT(xp->xb_private != NULL);
14328 14326 suip = (struct sd_uscsi_info *)xp->xb_private;
14329 14327 suip->ui_pkt_reason = pktp->pkt_reason;
14330 14328 suip->ui_pkt_state = pktp->pkt_state;
14331 14329 suip->ui_pkt_statistics = pktp->pkt_statistics;
14332 14330 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
14333 14331
14334 14332 /* We are done with the scsi_pkt; free it now */
14335 14333 ASSERT(SD_GET_PKTP(bp) != NULL);
14336 14334 scsi_destroy_pkt(SD_GET_PKTP(bp));
14337 14335
14338 14336 SD_TRACE(SD_LOG_IO_CORE, un,
14339 14337 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
14340 14338 }
14341 14339
14342 14340
14343 14341 /*
14344 14342 * Function: sd_bioclone_alloc
14345 14343 *
14346 14344 * Description: Allocate a buf(9S) and init it as per the given buf
14347 14345 * and the various arguments. The associated sd_xbuf
14348 14346 * struct is (nearly) duplicated. The struct buf *bp
14349 14347 * argument is saved in new_xp->xb_private.
14350 14348 *
14351 14349 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14352 14350 * datalen - size of data area for the shadow bp
14353 14351 * blkno - starting LBA
14354 14352 * func - function pointer for b_iodone in the shadow buf. (May
14355 14353 * be NULL if none.)
14356 14354 *
14357 14355 * Return Code: Pointer to allocates buf(9S) struct
14358 14356 *
14359 14357 * Context: Can sleep.
14360 14358 */
14361 14359
14362 14360 static struct buf *
14363 14361 sd_bioclone_alloc(struct buf *bp, size_t datalen,
14364 14362 daddr_t blkno, int (*func)(struct buf *))
14365 14363 {
14366 14364 struct sd_lun *un;
14367 14365 struct sd_xbuf *xp;
14368 14366 struct sd_xbuf *new_xp;
14369 14367 struct buf *new_bp;
14370 14368
14371 14369 ASSERT(bp != NULL);
14372 14370 xp = SD_GET_XBUF(bp);
14373 14371 ASSERT(xp != NULL);
14374 14372 un = SD_GET_UN(bp);
14375 14373 ASSERT(un != NULL);
14376 14374 ASSERT(!mutex_owned(SD_MUTEX(un)));
14377 14375
14378 14376 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
14379 14377 NULL, KM_SLEEP);
14380 14378
14381 14379 new_bp->b_lblkno = blkno;
14382 14380
14383 14381 /*
14384 14382 * Allocate an xbuf for the shadow bp and copy the contents of the
14385 14383 * original xbuf into it.
14386 14384 */
14387 14385 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14388 14386 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14389 14387
14390 14388 /*
14391 14389 * The given bp is automatically saved in the xb_private member
14392 14390 * of the new xbuf. Callers are allowed to depend on this.
14393 14391 */
14394 14392 new_xp->xb_private = bp;
14395 14393
14396 14394 new_bp->b_private = new_xp;
14397 14395
14398 14396 return (new_bp);
14399 14397 }
14400 14398
14401 14399 /*
14402 14400 * Function: sd_shadow_buf_alloc
14403 14401 *
14404 14402 * Description: Allocate a buf(9S) and init it as per the given buf
14405 14403 * and the various arguments. The associated sd_xbuf
14406 14404 * struct is (nearly) duplicated. The struct buf *bp
14407 14405 * argument is saved in new_xp->xb_private.
14408 14406 *
14409 14407 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14410 14408 * datalen - size of data area for the shadow bp
14411 14409 * bflags - B_READ or B_WRITE (pseudo flag)
14412 14410 * blkno - starting LBA
14413 14411 * func - function pointer for b_iodone in the shadow buf. (May
14414 14412 * be NULL if none.)
14415 14413 *
14416 14414 * Return Code: Pointer to allocates buf(9S) struct
14417 14415 *
14418 14416 * Context: Can sleep.
14419 14417 */
14420 14418
14421 14419 static struct buf *
14422 14420 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
14423 14421 daddr_t blkno, int (*func)(struct buf *))
14424 14422 {
14425 14423 struct sd_lun *un;
14426 14424 struct sd_xbuf *xp;
14427 14425 struct sd_xbuf *new_xp;
14428 14426 struct buf *new_bp;
14429 14427
14430 14428 ASSERT(bp != NULL);
14431 14429 xp = SD_GET_XBUF(bp);
14432 14430 ASSERT(xp != NULL);
14433 14431 un = SD_GET_UN(bp);
14434 14432 ASSERT(un != NULL);
14435 14433 ASSERT(!mutex_owned(SD_MUTEX(un)));
14436 14434
14437 14435 if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14438 14436 bp_mapin(bp);
14439 14437 }
14440 14438
14441 14439 bflags &= (B_READ | B_WRITE);
14442 14440 #if defined(__i386) || defined(__amd64)
14443 14441 new_bp = getrbuf(KM_SLEEP);
14444 14442 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14445 14443 new_bp->b_bcount = datalen;
14446 14444 new_bp->b_flags = bflags |
14447 14445 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14448 14446 #else
14449 14447 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14450 14448 datalen, bflags, SLEEP_FUNC, NULL);
14451 14449 #endif
14452 14450 new_bp->av_forw = NULL;
14453 14451 new_bp->av_back = NULL;
14454 14452 new_bp->b_dev = bp->b_dev;
14455 14453 new_bp->b_blkno = blkno;
14456 14454 new_bp->b_iodone = func;
14457 14455 new_bp->b_edev = bp->b_edev;
14458 14456 new_bp->b_resid = 0;
14459 14457
14460 14458 /* We need to preserve the B_FAILFAST flag */
14461 14459 if (bp->b_flags & B_FAILFAST) {
14462 14460 new_bp->b_flags |= B_FAILFAST;
14463 14461 }
14464 14462
14465 14463 /*
14466 14464 * Allocate an xbuf for the shadow bp and copy the contents of the
14467 14465 * original xbuf into it.
14468 14466 */
14469 14467 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14470 14468 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14471 14469
14472 14470 /* Need later to copy data between the shadow buf & original buf! */
14473 14471 new_xp->xb_pkt_flags |= PKT_CONSISTENT;
14474 14472
14475 14473 /*
14476 14474 * The given bp is automatically saved in the xb_private member
14477 14475 * of the new xbuf. Callers are allowed to depend on this.
14478 14476 */
14479 14477 new_xp->xb_private = bp;
14480 14478
14481 14479 new_bp->b_private = new_xp;
14482 14480
14483 14481 return (new_bp);
14484 14482 }
14485 14483
14486 14484 /*
14487 14485 * Function: sd_bioclone_free
14488 14486 *
14489 14487 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14490 14488 * in the larger than partition operation.
14491 14489 *
14492 14490 * Context: May be called under interrupt context
14493 14491 */
14494 14492
14495 14493 static void
14496 14494 sd_bioclone_free(struct buf *bp)
14497 14495 {
14498 14496 struct sd_xbuf *xp;
14499 14497
14500 14498 ASSERT(bp != NULL);
14501 14499 xp = SD_GET_XBUF(bp);
14502 14500 ASSERT(xp != NULL);
14503 14501
14504 14502 /*
14505 14503 * Call bp_mapout() before freeing the buf, in case a lower
14506 14504 * layer or HBA had done a bp_mapin(). we must do this here
14507 14505 * as we are the "originator" of the shadow buf.
14508 14506 */
14509 14507 bp_mapout(bp);
14510 14508
14511 14509 /*
14512 14510 * Null out b_iodone before freeing the bp, to ensure that the driver
14513 14511 * never gets confused by a stale value in this field. (Just a little
14514 14512 * extra defensiveness here.)
14515 14513 */
14516 14514 bp->b_iodone = NULL;
14517 14515
14518 14516 freerbuf(bp);
14519 14517
14520 14518 kmem_free(xp, sizeof (struct sd_xbuf));
14521 14519 }
14522 14520
14523 14521 /*
14524 14522 * Function: sd_shadow_buf_free
14525 14523 *
14526 14524 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14527 14525 *
14528 14526 * Context: May be called under interrupt context
14529 14527 */
14530 14528
14531 14529 static void
14532 14530 sd_shadow_buf_free(struct buf *bp)
14533 14531 {
14534 14532 struct sd_xbuf *xp;
14535 14533
14536 14534 ASSERT(bp != NULL);
14537 14535 xp = SD_GET_XBUF(bp);
14538 14536 ASSERT(xp != NULL);
14539 14537
14540 14538 #if defined(__sparc)
14541 14539 /*
14542 14540 * Call bp_mapout() before freeing the buf, in case a lower
14543 14541 * layer or HBA had done a bp_mapin(). we must do this here
14544 14542 * as we are the "originator" of the shadow buf.
14545 14543 */
14546 14544 bp_mapout(bp);
14547 14545 #endif
14548 14546
14549 14547 /*
14550 14548 * Null out b_iodone before freeing the bp, to ensure that the driver
14551 14549 * never gets confused by a stale value in this field. (Just a little
14552 14550 * extra defensiveness here.)
14553 14551 */
14554 14552 bp->b_iodone = NULL;
14555 14553
14556 14554 #if defined(__i386) || defined(__amd64)
14557 14555 kmem_free(bp->b_un.b_addr, bp->b_bcount);
14558 14556 freerbuf(bp);
14559 14557 #else
14560 14558 scsi_free_consistent_buf(bp);
14561 14559 #endif
14562 14560
14563 14561 kmem_free(xp, sizeof (struct sd_xbuf));
14564 14562 }
14565 14563
14566 14564
14567 14565 /*
14568 14566 * Function: sd_print_transport_rejected_message
14569 14567 *
14570 14568 * Description: This implements the ludicrously complex rules for printing
14571 14569 * a "transport rejected" message. This is to address the
14572 14570 * specific problem of having a flood of this error message
14573 14571 * produced when a failover occurs.
14574 14572 *
14575 14573 * Context: Any.
14576 14574 */
14577 14575
14578 14576 static void
14579 14577 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
14580 14578 int code)
14581 14579 {
14582 14580 ASSERT(un != NULL);
14583 14581 ASSERT(mutex_owned(SD_MUTEX(un)));
14584 14582 ASSERT(xp != NULL);
14585 14583
14586 14584 /*
14587 14585 * Print the "transport rejected" message under the following
14588 14586 * conditions:
14589 14587 *
14590 14588 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14591 14589 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14592 14590 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14593 14591 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14594 14592 * scsi_transport(9F) (which indicates that the target might have
14595 14593 * gone off-line). This uses the un->un_tran_fatal_count
14596 14594 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14597 14595 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14598 14596 * from scsi_transport().
14599 14597 *
14600 14598 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14601 14599 * the preceeding cases in order for the message to be printed.
14602 14600 */
14603 14601 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
14604 14602 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
14605 14603 if ((sd_level_mask & SD_LOGMASK_DIAG) ||
14606 14604 (code != TRAN_FATAL_ERROR) ||
14607 14605 (un->un_tran_fatal_count == 1)) {
14608 14606 switch (code) {
14609 14607 case TRAN_BADPKT:
14610 14608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14611 14609 "transport rejected bad packet\n");
14612 14610 break;
14613 14611 case TRAN_FATAL_ERROR:
14614 14612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14615 14613 "transport rejected fatal error\n");
14616 14614 break;
14617 14615 default:
14618 14616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14619 14617 "transport rejected (%d)\n", code);
14620 14618 break;
14621 14619 }
14622 14620 }
14623 14621 }
14624 14622 }
14625 14623
14626 14624
14627 14625 /*
14628 14626 * Function: sd_add_buf_to_waitq
14629 14627 *
14630 14628 * Description: Add the given buf(9S) struct to the wait queue for the
14631 14629 * instance. If sorting is enabled, then the buf is added
14632 14630 * to the queue via an elevator sort algorithm (a la
14633 14631 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14634 14632 * If sorting is not enabled, then the buf is just added
14635 14633 * to the end of the wait queue.
14636 14634 *
14637 14635 * Return Code: void
14638 14636 *
14639 14637 * Context: Does not sleep/block, therefore technically can be called
14640 14638 * from any context. However if sorting is enabled then the
14641 14639 * execution time is indeterminate, and may take long if
14642 14640 * the wait queue grows large.
14643 14641 */
14644 14642
14645 14643 static void
14646 14644 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
14647 14645 {
14648 14646 struct buf *ap;
14649 14647
14650 14648 ASSERT(bp != NULL);
14651 14649 ASSERT(un != NULL);
14652 14650 ASSERT(mutex_owned(SD_MUTEX(un)));
14653 14651
14654 14652 /* If the queue is empty, add the buf as the only entry & return. */
14655 14653 if (un->un_waitq_headp == NULL) {
14656 14654 ASSERT(un->un_waitq_tailp == NULL);
14657 14655 un->un_waitq_headp = un->un_waitq_tailp = bp;
14658 14656 bp->av_forw = NULL;
14659 14657 return;
14660 14658 }
14661 14659
14662 14660 ASSERT(un->un_waitq_tailp != NULL);
14663 14661
14664 14662 /*
14665 14663 * If sorting is disabled, just add the buf to the tail end of
14666 14664 * the wait queue and return.
14667 14665 */
14668 14666 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
14669 14667 un->un_waitq_tailp->av_forw = bp;
14670 14668 un->un_waitq_tailp = bp;
14671 14669 bp->av_forw = NULL;
14672 14670 return;
14673 14671 }
14674 14672
14675 14673 /*
14676 14674 * Sort thru the list of requests currently on the wait queue
14677 14675 * and add the new buf request at the appropriate position.
14678 14676 *
14679 14677 * The un->un_waitq_headp is an activity chain pointer on which
14680 14678 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14681 14679 * first queue holds those requests which are positioned after
14682 14680 * the current SD_GET_BLKNO() (in the first request); the second holds
14683 14681 * requests which came in after their SD_GET_BLKNO() number was passed.
14684 14682 * Thus we implement a one way scan, retracting after reaching
14685 14683 * the end of the drive to the first request on the second
14686 14684 * queue, at which time it becomes the first queue.
14687 14685 * A one-way scan is natural because of the way UNIX read-ahead
14688 14686 * blocks are allocated.
14689 14687 *
14690 14688 * If we lie after the first request, then we must locate the
14691 14689 * second request list and add ourselves to it.
14692 14690 */
14693 14691 ap = un->un_waitq_headp;
14694 14692 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
14695 14693 while (ap->av_forw != NULL) {
14696 14694 /*
14697 14695 * Look for an "inversion" in the (normally
14698 14696 * ascending) block numbers. This indicates
14699 14697 * the start of the second request list.
14700 14698 */
14701 14699 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
14702 14700 /*
14703 14701 * Search the second request list for the
14704 14702 * first request at a larger block number.
14705 14703 * We go before that; however if there is
14706 14704 * no such request, we go at the end.
14707 14705 */
14708 14706 do {
14709 14707 if (SD_GET_BLKNO(bp) <
14710 14708 SD_GET_BLKNO(ap->av_forw)) {
14711 14709 goto insert;
14712 14710 }
14713 14711 ap = ap->av_forw;
14714 14712 } while (ap->av_forw != NULL);
14715 14713 goto insert; /* after last */
14716 14714 }
14717 14715 ap = ap->av_forw;
14718 14716 }
14719 14717
14720 14718 /*
14721 14719 * No inversions... we will go after the last, and
14722 14720 * be the first request in the second request list.
14723 14721 */
14724 14722 goto insert;
14725 14723 }
14726 14724
14727 14725 /*
14728 14726 * Request is at/after the current request...
14729 14727 * sort in the first request list.
14730 14728 */
14731 14729 while (ap->av_forw != NULL) {
14732 14730 /*
14733 14731 * We want to go after the current request (1) if
14734 14732 * there is an inversion after it (i.e. it is the end
14735 14733 * of the first request list), or (2) if the next
14736 14734 * request is a larger block no. than our request.
14737 14735 */
14738 14736 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
14739 14737 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
14740 14738 goto insert;
14741 14739 }
14742 14740 ap = ap->av_forw;
14743 14741 }
14744 14742
14745 14743 /*
14746 14744 * Neither a second list nor a larger request, therefore
14747 14745 * we go at the end of the first list (which is the same
14748 14746 * as the end of the whole schebang).
14749 14747 */
14750 14748 insert:
14751 14749 bp->av_forw = ap->av_forw;
14752 14750 ap->av_forw = bp;
14753 14751
14754 14752 /*
14755 14753 * If we inserted onto the tail end of the waitq, make sure the
14756 14754 * tail pointer is updated.
14757 14755 */
14758 14756 if (ap == un->un_waitq_tailp) {
14759 14757 un->un_waitq_tailp = bp;
14760 14758 }
14761 14759 }
14762 14760
14763 14761
14764 14762 /*
14765 14763 * Function: sd_start_cmds
14766 14764 *
14767 14765 * Description: Remove and transport cmds from the driver queues.
14768 14766 *
14769 14767 * Arguments: un - pointer to the unit (soft state) struct for the target.
14770 14768 *
14771 14769 * immed_bp - ptr to a buf to be transported immediately. Only
14772 14770 * the immed_bp is transported; bufs on the waitq are not
14773 14771 * processed and the un_retry_bp is not checked. If immed_bp is
14774 14772 * NULL, then normal queue processing is performed.
14775 14773 *
14776 14774 * Context: May be called from kernel thread context, interrupt context,
14777 14775 * or runout callback context. This function may not block or
14778 14776 * call routines that block.
14779 14777 */
14780 14778
14781 14779 static void
14782 14780 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14783 14781 {
14784 14782 struct sd_xbuf *xp;
14785 14783 struct buf *bp;
14786 14784 void (*statp)(kstat_io_t *);
14787 14785 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14788 14786 void (*saved_statp)(kstat_io_t *);
14789 14787 #endif
14790 14788 int rval;
14791 14789 struct sd_fm_internal *sfip = NULL;
14792 14790
14793 14791 ASSERT(un != NULL);
14794 14792 ASSERT(mutex_owned(SD_MUTEX(un)));
14795 14793 ASSERT(un->un_ncmds_in_transport >= 0);
14796 14794 ASSERT(un->un_throttle >= 0);
14797 14795
14798 14796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14799 14797
14800 14798 do {
14801 14799 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14802 14800 saved_statp = NULL;
14803 14801 #endif
14804 14802
14805 14803 /*
14806 14804 * If we are syncing or dumping, fail the command to
14807 14805 * avoid recursively calling back into scsi_transport().
14808 14806 * The dump I/O itself uses a separate code path so this
14809 14807 * only prevents non-dump I/O from being sent while dumping.
14810 14808 * File system sync takes place before dumping begins.
14811 14809 * During panic, filesystem I/O is allowed provided
14812 14810 * un_in_callback is <= 1. This is to prevent recursion
14813 14811 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14814 14812 * sd_start_cmds and so on. See panic.c for more information
14815 14813 * about the states the system can be in during panic.
14816 14814 */
14817 14815 if ((un->un_state == SD_STATE_DUMPING) ||
14818 14816 (ddi_in_panic() && (un->un_in_callback > 1))) {
14819 14817 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14820 14818 "sd_start_cmds: panicking\n");
14821 14819 goto exit;
14822 14820 }
14823 14821
14824 14822 if ((bp = immed_bp) != NULL) {
14825 14823 /*
14826 14824 * We have a bp that must be transported immediately.
14827 14825 * It's OK to transport the immed_bp here without doing
14828 14826 * the throttle limit check because the immed_bp is
14829 14827 * always used in a retry/recovery case. This means
14830 14828 * that we know we are not at the throttle limit by
14831 14829 * virtue of the fact that to get here we must have
14832 14830 * already gotten a command back via sdintr(). This also
14833 14831 * relies on (1) the command on un_retry_bp preventing
14834 14832 * further commands from the waitq from being issued;
14835 14833 * and (2) the code in sd_retry_command checking the
14836 14834 * throttle limit before issuing a delayed or immediate
14837 14835 * retry. This holds even if the throttle limit is
14838 14836 * currently ratcheted down from its maximum value.
14839 14837 */
14840 14838 statp = kstat_runq_enter;
14841 14839 if (bp == un->un_retry_bp) {
14842 14840 ASSERT((un->un_retry_statp == NULL) ||
14843 14841 (un->un_retry_statp == kstat_waitq_enter) ||
14844 14842 (un->un_retry_statp ==
14845 14843 kstat_runq_back_to_waitq));
14846 14844 /*
14847 14845 * If the waitq kstat was incremented when
14848 14846 * sd_set_retry_bp() queued this bp for a retry,
14849 14847 * then we must set up statp so that the waitq
14850 14848 * count will get decremented correctly below.
14851 14849 * Also we must clear un->un_retry_statp to
14852 14850 * ensure that we do not act on a stale value
14853 14851 * in this field.
14854 14852 */
14855 14853 if ((un->un_retry_statp == kstat_waitq_enter) ||
14856 14854 (un->un_retry_statp ==
14857 14855 kstat_runq_back_to_waitq)) {
14858 14856 statp = kstat_waitq_to_runq;
14859 14857 }
14860 14858 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14861 14859 saved_statp = un->un_retry_statp;
14862 14860 #endif
14863 14861 un->un_retry_statp = NULL;
14864 14862
14865 14863 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14866 14864 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14867 14865 "un_throttle:%d un_ncmds_in_transport:%d\n",
14868 14866 un, un->un_retry_bp, un->un_throttle,
14869 14867 un->un_ncmds_in_transport);
14870 14868 } else {
14871 14869 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14872 14870 "processing priority bp:0x%p\n", bp);
14873 14871 }
14874 14872
14875 14873 } else if ((bp = un->un_waitq_headp) != NULL) {
14876 14874 /*
14877 14875 * A command on the waitq is ready to go, but do not
14878 14876 * send it if:
14879 14877 *
14880 14878 * (1) the throttle limit has been reached, or
14881 14879 * (2) a retry is pending, or
14882 14880 * (3) a START_STOP_UNIT callback pending, or
14883 14881 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14884 14882 * command is pending.
14885 14883 *
14886 14884 * For all of these conditions, IO processing will
14887 14885 * restart after the condition is cleared.
14888 14886 */
14889 14887 if (un->un_ncmds_in_transport >= un->un_throttle) {
14890 14888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14891 14889 "sd_start_cmds: exiting, "
14892 14890 "throttle limit reached!\n");
14893 14891 goto exit;
14894 14892 }
14895 14893 if (un->un_retry_bp != NULL) {
14896 14894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14897 14895 "sd_start_cmds: exiting, retry pending!\n");
14898 14896 goto exit;
14899 14897 }
14900 14898 if (un->un_startstop_timeid != NULL) {
14901 14899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14902 14900 "sd_start_cmds: exiting, "
14903 14901 "START_STOP pending!\n");
14904 14902 goto exit;
14905 14903 }
14906 14904 if (un->un_direct_priority_timeid != NULL) {
14907 14905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14908 14906 "sd_start_cmds: exiting, "
14909 14907 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14910 14908 goto exit;
14911 14909 }
14912 14910
14913 14911 /* Dequeue the command */
14914 14912 un->un_waitq_headp = bp->av_forw;
14915 14913 if (un->un_waitq_headp == NULL) {
14916 14914 un->un_waitq_tailp = NULL;
14917 14915 }
14918 14916 bp->av_forw = NULL;
14919 14917 statp = kstat_waitq_to_runq;
14920 14918 SD_TRACE(SD_LOG_IO_CORE, un,
14921 14919 "sd_start_cmds: processing waitq bp:0x%p\n", bp);
14922 14920
14923 14921 } else {
14924 14922 /* No work to do so bail out now */
14925 14923 SD_TRACE(SD_LOG_IO_CORE, un,
14926 14924 "sd_start_cmds: no more work, exiting!\n");
14927 14925 goto exit;
14928 14926 }
14929 14927
14930 14928 /*
14931 14929 * Reset the state to normal. This is the mechanism by which
14932 14930 * the state transitions from either SD_STATE_RWAIT or
14933 14931 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14934 14932 * If state is SD_STATE_PM_CHANGING then this command is
14935 14933 * part of the device power control and the state must
14936 14934 * not be put back to normal. Doing so would would
14937 14935 * allow new commands to proceed when they shouldn't,
14938 14936 * the device may be going off.
14939 14937 */
14940 14938 if ((un->un_state != SD_STATE_SUSPENDED) &&
14941 14939 (un->un_state != SD_STATE_PM_CHANGING)) {
14942 14940 New_state(un, SD_STATE_NORMAL);
14943 14941 }
14944 14942
14945 14943 xp = SD_GET_XBUF(bp);
14946 14944 ASSERT(xp != NULL);
14947 14945
14948 14946 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14949 14947 /*
14950 14948 * Allocate the scsi_pkt if we need one, or attach DMA
14951 14949 * resources if we have a scsi_pkt that needs them. The
14952 14950 * latter should only occur for commands that are being
14953 14951 * retried.
14954 14952 */
14955 14953 if ((xp->xb_pktp == NULL) ||
14956 14954 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14957 14955 #else
14958 14956 if (xp->xb_pktp == NULL) {
14959 14957 #endif
14960 14958 /*
14961 14959 * There is no scsi_pkt allocated for this buf. Call
14962 14960 * the initpkt function to allocate & init one.
14963 14961 *
14964 14962 * The scsi_init_pkt runout callback functionality is
14965 14963 * implemented as follows:
14966 14964 *
14967 14965 * 1) The initpkt function always calls
14968 14966 * scsi_init_pkt(9F) with sdrunout specified as the
14969 14967 * callback routine.
14970 14968 * 2) A successful packet allocation is initialized and
14971 14969 * the I/O is transported.
14972 14970 * 3) The I/O associated with an allocation resource
14973 14971 * failure is left on its queue to be retried via
14974 14972 * runout or the next I/O.
14975 14973 * 4) The I/O associated with a DMA error is removed
14976 14974 * from the queue and failed with EIO. Processing of
14977 14975 * the transport queues is also halted to be
14978 14976 * restarted via runout or the next I/O.
14979 14977 * 5) The I/O associated with a CDB size or packet
14980 14978 * size error is removed from the queue and failed
14981 14979 * with EIO. Processing of the transport queues is
14982 14980 * continued.
14983 14981 *
14984 14982 * Note: there is no interface for canceling a runout
14985 14983 * callback. To prevent the driver from detaching or
14986 14984 * suspending while a runout is pending the driver
14987 14985 * state is set to SD_STATE_RWAIT
14988 14986 *
14989 14987 * Note: using the scsi_init_pkt callback facility can
14990 14988 * result in an I/O request persisting at the head of
14991 14989 * the list which cannot be satisfied even after
14992 14990 * multiple retries. In the future the driver may
14993 14991 * implement some kind of maximum runout count before
14994 14992 * failing an I/O.
14995 14993 *
14996 14994 * Note: the use of funcp below may seem superfluous,
14997 14995 * but it helps warlock figure out the correct
14998 14996 * initpkt function calls (see [s]sd.wlcmd).
14999 14997 */
15000 14998 struct scsi_pkt *pktp;
15001 14999 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
15002 15000
15003 15001 ASSERT(bp != un->un_rqs_bp);
15004 15002
15005 15003 funcp = sd_initpkt_map[xp->xb_chain_iostart];
15006 15004 switch ((*funcp)(bp, &pktp)) {
15007 15005 case SD_PKT_ALLOC_SUCCESS:
15008 15006 xp->xb_pktp = pktp;
15009 15007 SD_TRACE(SD_LOG_IO_CORE, un,
15010 15008 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
15011 15009 pktp);
15012 15010 goto got_pkt;
15013 15011
15014 15012 case SD_PKT_ALLOC_FAILURE:
15015 15013 /*
15016 15014 * Temporary (hopefully) resource depletion.
15017 15015 * Since retries and RQS commands always have a
15018 15016 * scsi_pkt allocated, these cases should never
15019 15017 * get here. So the only cases this needs to
15020 15018 * handle is a bp from the waitq (which we put
15021 15019 * back onto the waitq for sdrunout), or a bp
15022 15020 * sent as an immed_bp (which we just fail).
15023 15021 */
15024 15022 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15025 15023 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
15026 15024
15027 15025 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15028 15026
15029 15027 if (bp == immed_bp) {
15030 15028 /*
15031 15029 * If SD_XB_DMA_FREED is clear, then
15032 15030 * this is a failure to allocate a
15033 15031 * scsi_pkt, and we must fail the
15034 15032 * command.
15035 15033 */
15036 15034 if ((xp->xb_pkt_flags &
15037 15035 SD_XB_DMA_FREED) == 0) {
15038 15036 break;
15039 15037 }
15040 15038
15041 15039 /*
15042 15040 * If this immediate command is NOT our
15043 15041 * un_retry_bp, then we must fail it.
15044 15042 */
15045 15043 if (bp != un->un_retry_bp) {
15046 15044 break;
15047 15045 }
15048 15046
15049 15047 /*
15050 15048 * We get here if this cmd is our
15051 15049 * un_retry_bp that was DMAFREED, but
15052 15050 * scsi_init_pkt() failed to reallocate
15053 15051 * DMA resources when we attempted to
15054 15052 * retry it. This can happen when an
15055 15053 * mpxio failover is in progress, but
15056 15054 * we don't want to just fail the
15057 15055 * command in this case.
15058 15056 *
15059 15057 * Use timeout(9F) to restart it after
15060 15058 * a 100ms delay. We don't want to
15061 15059 * let sdrunout() restart it, because
15062 15060 * sdrunout() is just supposed to start
15063 15061 * commands that are sitting on the
15064 15062 * wait queue. The un_retry_bp stays
15065 15063 * set until the command completes, but
15066 15064 * sdrunout can be called many times
15067 15065 * before that happens. Since sdrunout
15068 15066 * cannot tell if the un_retry_bp is
15069 15067 * already in the transport, it could
15070 15068 * end up calling scsi_transport() for
15071 15069 * the un_retry_bp multiple times.
15072 15070 *
15073 15071 * Also: don't schedule the callback
15074 15072 * if some other callback is already
15075 15073 * pending.
15076 15074 */
15077 15075 if (un->un_retry_statp == NULL) {
15078 15076 /*
15079 15077 * restore the kstat pointer to
15080 15078 * keep kstat counts coherent
15081 15079 * when we do retry the command.
15082 15080 */
15083 15081 un->un_retry_statp =
15084 15082 saved_statp;
15085 15083 }
15086 15084
15087 15085 if ((un->un_startstop_timeid == NULL) &&
15088 15086 (un->un_retry_timeid == NULL) &&
15089 15087 (un->un_direct_priority_timeid ==
15090 15088 NULL)) {
15091 15089
15092 15090 un->un_retry_timeid =
15093 15091 timeout(
15094 15092 sd_start_retry_command,
15095 15093 un, SD_RESTART_TIMEOUT);
15096 15094 }
15097 15095 goto exit;
15098 15096 }
15099 15097
15100 15098 #else
15101 15099 if (bp == immed_bp) {
15102 15100 break; /* Just fail the command */
15103 15101 }
15104 15102 #endif
15105 15103
15106 15104 /* Add the buf back to the head of the waitq */
15107 15105 bp->av_forw = un->un_waitq_headp;
15108 15106 un->un_waitq_headp = bp;
15109 15107 if (un->un_waitq_tailp == NULL) {
15110 15108 un->un_waitq_tailp = bp;
15111 15109 }
15112 15110 goto exit;
15113 15111
15114 15112 case SD_PKT_ALLOC_FAILURE_NO_DMA:
15115 15113 /*
15116 15114 * HBA DMA resource failure. Fail the command
15117 15115 * and continue processing of the queues.
15118 15116 */
15119 15117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15120 15118 "sd_start_cmds: "
15121 15119 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15122 15120 break;
15123 15121
15124 15122 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15125 15123 /*
15126 15124 * Note:x86: Partial DMA mapping not supported
15127 15125 * for USCSI commands, and all the needed DMA
15128 15126 * resources were not allocated.
15129 15127 */
15130 15128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15131 15129 "sd_start_cmds: "
15132 15130 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15133 15131 break;
15134 15132
15135 15133 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15136 15134 /*
15137 15135 * Note:x86: Request cannot fit into CDB based
15138 15136 * on lba and len.
15139 15137 */
15140 15138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15141 15139 "sd_start_cmds: "
15142 15140 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15143 15141 break;
15144 15142
15145 15143 default:
15146 15144 /* Should NEVER get here! */
15147 15145 panic("scsi_initpkt error");
15148 15146 /*NOTREACHED*/
15149 15147 }
15150 15148
15151 15149 /*
15152 15150 * Fatal error in allocating a scsi_pkt for this buf.
15153 15151 * Update kstats & return the buf with an error code.
15154 15152 * We must use sd_return_failed_command_no_restart() to
15155 15153 * avoid a recursive call back into sd_start_cmds().
15156 15154 * However this also means that we must keep processing
15157 15155 * the waitq here in order to avoid stalling.
15158 15156 */
15159 15157 if (statp == kstat_waitq_to_runq) {
15160 15158 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
15161 15159 }
15162 15160 sd_return_failed_command_no_restart(un, bp, EIO);
15163 15161 if (bp == immed_bp) {
15164 15162 /* immed_bp is gone by now, so clear this */
15165 15163 immed_bp = NULL;
15166 15164 }
15167 15165 continue;
15168 15166 }
15169 15167 got_pkt:
15170 15168 if (bp == immed_bp) {
15171 15169 /* goto the head of the class.... */
15172 15170 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15173 15171 }
15174 15172
15175 15173 un->un_ncmds_in_transport++;
15176 15174 SD_UPDATE_KSTATS(un, statp, bp);
15177 15175
15178 15176 /*
15179 15177 * Call scsi_transport() to send the command to the target.
15180 15178 * According to SCSA architecture, we must drop the mutex here
15181 15179 * before calling scsi_transport() in order to avoid deadlock.
15182 15180 * Note that the scsi_pkt's completion routine can be executed
15183 15181 * (from interrupt context) even before the call to
15184 15182 * scsi_transport() returns.
15185 15183 */
15186 15184 SD_TRACE(SD_LOG_IO_CORE, un,
15187 15185 "sd_start_cmds: calling scsi_transport()\n");
15188 15186 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15189 15187
15190 15188 mutex_exit(SD_MUTEX(un));
15191 15189 rval = scsi_transport(xp->xb_pktp);
15192 15190 mutex_enter(SD_MUTEX(un));
15193 15191
15194 15192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15195 15193 "sd_start_cmds: scsi_transport() returned %d\n", rval);
15196 15194
15197 15195 switch (rval) {
15198 15196 case TRAN_ACCEPT:
15199 15197 /* Clear this with every pkt accepted by the HBA */
15200 15198 un->un_tran_fatal_count = 0;
15201 15199 break; /* Success; try the next cmd (if any) */
15202 15200
15203 15201 case TRAN_BUSY:
15204 15202 un->un_ncmds_in_transport--;
15205 15203 ASSERT(un->un_ncmds_in_transport >= 0);
15206 15204
15207 15205 /*
15208 15206 * Don't retry request sense, the sense data
15209 15207 * is lost when another request is sent.
15210 15208 * Free up the rqs buf and retry
15211 15209 * the original failed cmd. Update kstat.
15212 15210 */
15213 15211 if (bp == un->un_rqs_bp) {
15214 15212 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15215 15213 bp = sd_mark_rqs_idle(un, xp);
15216 15214 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15217 15215 NULL, NULL, EIO, un->un_busy_timeout / 500,
15218 15216 kstat_waitq_enter);
15219 15217 goto exit;
15220 15218 }
15221 15219
15222 15220 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15223 15221 /*
15224 15222 * Free the DMA resources for the scsi_pkt. This will
15225 15223 * allow mpxio to select another path the next time
15226 15224 * we call scsi_transport() with this scsi_pkt.
15227 15225 * See sdintr() for the rationalization behind this.
15228 15226 */
15229 15227 if ((un->un_f_is_fibre == TRUE) &&
15230 15228 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15231 15229 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15232 15230 scsi_dmafree(xp->xb_pktp);
15233 15231 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15234 15232 }
15235 15233 #endif
15236 15234
15237 15235 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15238 15236 /*
15239 15237 * Commands that are SD_PATH_DIRECT_PRIORITY
15240 15238 * are for error recovery situations. These do
15241 15239 * not use the normal command waitq, so if they
15242 15240 * get a TRAN_BUSY we cannot put them back onto
15243 15241 * the waitq for later retry. One possible
15244 15242 * problem is that there could already be some
15245 15243 * other command on un_retry_bp that is waiting
15246 15244 * for this one to complete, so we would be
15247 15245 * deadlocked if we put this command back onto
15248 15246 * the waitq for later retry (since un_retry_bp
15249 15247 * must complete before the driver gets back to
15250 15248 * commands on the waitq).
15251 15249 *
15252 15250 * To avoid deadlock we must schedule a callback
15253 15251 * that will restart this command after a set
15254 15252 * interval. This should keep retrying for as
15255 15253 * long as the underlying transport keeps
15256 15254 * returning TRAN_BUSY (just like for other
15257 15255 * commands). Use the same timeout interval as
15258 15256 * for the ordinary TRAN_BUSY retry.
15259 15257 */
15260 15258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15261 15259 "sd_start_cmds: scsi_transport() returned "
15262 15260 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15263 15261
15264 15262 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15265 15263 un->un_direct_priority_timeid =
15266 15264 timeout(sd_start_direct_priority_command,
15267 15265 bp, un->un_busy_timeout / 500);
15268 15266
15269 15267 goto exit;
15270 15268 }
15271 15269
15272 15270 /*
15273 15271 * For TRAN_BUSY, we want to reduce the throttle value,
15274 15272 * unless we are retrying a command.
15275 15273 */
15276 15274 if (bp != un->un_retry_bp) {
15277 15275 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15278 15276 }
15279 15277
15280 15278 /*
15281 15279 * Set up the bp to be tried again 10 ms later.
15282 15280 * Note:x86: Is there a timeout value in the sd_lun
15283 15281 * for this condition?
15284 15282 */
15285 15283 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15286 15284 kstat_runq_back_to_waitq);
15287 15285 goto exit;
15288 15286
15289 15287 case TRAN_FATAL_ERROR:
15290 15288 un->un_tran_fatal_count++;
15291 15289 /* FALLTHRU */
15292 15290
15293 15291 case TRAN_BADPKT:
15294 15292 default:
15295 15293 un->un_ncmds_in_transport--;
15296 15294 ASSERT(un->un_ncmds_in_transport >= 0);
15297 15295
15298 15296 /*
15299 15297 * If this is our REQUEST SENSE command with a
15300 15298 * transport error, we must get back the pointers
15301 15299 * to the original buf, and mark the REQUEST
15302 15300 * SENSE command as "available".
15303 15301 */
15304 15302 if (bp == un->un_rqs_bp) {
15305 15303 bp = sd_mark_rqs_idle(un, xp);
15306 15304 xp = SD_GET_XBUF(bp);
15307 15305 } else {
15308 15306 /*
15309 15307 * Legacy behavior: do not update transport
15310 15308 * error count for request sense commands.
15311 15309 */
15312 15310 SD_UPDATE_ERRSTATS(un, sd_transerrs);
15313 15311 }
15314 15312
15315 15313 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15316 15314 sd_print_transport_rejected_message(un, xp, rval);
15317 15315
15318 15316 /*
15319 15317 * This command will be terminated by SD driver due
15320 15318 * to a fatal transport error. We should post
15321 15319 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15322 15320 * of "fail" for any command to indicate this
15323 15321 * situation.
15324 15322 */
15325 15323 if (xp->xb_ena > 0) {
15326 15324 ASSERT(un->un_fm_private != NULL);
15327 15325 sfip = un->un_fm_private;
15328 15326 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
15329 15327 sd_ssc_extract_info(&sfip->fm_ssc, un,
15330 15328 xp->xb_pktp, bp, xp);
15331 15329 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15332 15330 }
15333 15331
15334 15332 /*
15335 15333 * We must use sd_return_failed_command_no_restart() to
15336 15334 * avoid a recursive call back into sd_start_cmds().
15337 15335 * However this also means that we must keep processing
15338 15336 * the waitq here in order to avoid stalling.
15339 15337 */
15340 15338 sd_return_failed_command_no_restart(un, bp, EIO);
15341 15339
15342 15340 /*
15343 15341 * Notify any threads waiting in sd_ddi_suspend() that
15344 15342 * a command completion has occurred.
15345 15343 */
15346 15344 if (un->un_state == SD_STATE_SUSPENDED) {
15347 15345 cv_broadcast(&un->un_disk_busy_cv);
15348 15346 }
15349 15347
15350 15348 if (bp == immed_bp) {
15351 15349 /* immed_bp is gone by now, so clear this */
15352 15350 immed_bp = NULL;
15353 15351 }
15354 15352 break;
15355 15353 }
15356 15354
15357 15355 } while (immed_bp == NULL);
15358 15356
15359 15357 exit:
15360 15358 ASSERT(mutex_owned(SD_MUTEX(un)));
15361 15359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
15362 15360 }
15363 15361
15364 15362
15365 15363 /*
15366 15364 * Function: sd_return_command
15367 15365 *
15368 15366 * Description: Returns a command to its originator (with or without an
15369 15367 * error). Also starts commands waiting to be transported
15370 15368 * to the target.
15371 15369 *
15372 15370 * Context: May be called from interrupt, kernel, or timeout context
15373 15371 */
15374 15372
15375 15373 static void
15376 15374 sd_return_command(struct sd_lun *un, struct buf *bp)
15377 15375 {
15378 15376 struct sd_xbuf *xp;
15379 15377 struct scsi_pkt *pktp;
15380 15378 struct sd_fm_internal *sfip;
15381 15379
15382 15380 ASSERT(bp != NULL);
15383 15381 ASSERT(un != NULL);
15384 15382 ASSERT(mutex_owned(SD_MUTEX(un)));
15385 15383 ASSERT(bp != un->un_rqs_bp);
15386 15384 xp = SD_GET_XBUF(bp);
15387 15385 ASSERT(xp != NULL);
15388 15386
15389 15387 pktp = SD_GET_PKTP(bp);
15390 15388 sfip = (struct sd_fm_internal *)un->un_fm_private;
15391 15389 ASSERT(sfip != NULL);
15392 15390
15393 15391 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
15394 15392
15395 15393 /*
15396 15394 * Note: check for the "sdrestart failed" case.
15397 15395 */
15398 15396 if ((un->un_partial_dma_supported == 1) &&
15399 15397 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
15400 15398 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
15401 15399 (xp->xb_pktp->pkt_resid == 0)) {
15402 15400
15403 15401 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15404 15402 /*
15405 15403 * Successfully set up next portion of cmd
15406 15404 * transfer, try sending it
15407 15405 */
15408 15406 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15409 15407 NULL, NULL, 0, (clock_t)0, NULL);
15410 15408 sd_start_cmds(un, NULL);
15411 15409 return; /* Note:x86: need a return here? */
15412 15410 }
15413 15411 }
15414 15412
15415 15413 /*
15416 15414 * If this is the failfast bp, clear it from un_failfast_bp. This
15417 15415 * can happen if upon being re-tried the failfast bp either
15418 15416 * succeeded or encountered another error (possibly even a different
15419 15417 * error than the one that precipitated the failfast state, but in
15420 15418 * that case it would have had to exhaust retries as well). Regardless,
15421 15419 * this should not occur whenever the instance is in the active
15422 15420 * failfast state.
15423 15421 */
15424 15422 if (bp == un->un_failfast_bp) {
15425 15423 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15426 15424 un->un_failfast_bp = NULL;
15427 15425 }
15428 15426
15429 15427 /*
15430 15428 * Clear the failfast state upon successful completion of ANY cmd.
15431 15429 */
15432 15430 if (bp->b_error == 0) {
15433 15431 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15434 15432 /*
15435 15433 * If this is a successful command, but used to be retried,
15436 15434 * we will take it as a recovered command and post an
15437 15435 * ereport with driver-assessment of "recovered".
15438 15436 */
15439 15437 if (xp->xb_ena > 0) {
15440 15438 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15441 15439 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
15442 15440 }
15443 15441 } else {
15444 15442 /*
15445 15443 * If this is a failed non-USCSI command we will post an
15446 15444 * ereport with driver-assessment set accordingly("fail" or
15447 15445 * "fatal").
15448 15446 */
15449 15447 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15450 15448 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15451 15449 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15452 15450 }
15453 15451 }
15454 15452
15455 15453 /*
15456 15454 * This is used if the command was retried one or more times. Show that
15457 15455 * we are done with it, and allow processing of the waitq to resume.
15458 15456 */
15459 15457 if (bp == un->un_retry_bp) {
15460 15458 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15461 15459 "sd_return_command: un:0x%p: "
15462 15460 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15463 15461 un->un_retry_bp = NULL;
15464 15462 un->un_retry_statp = NULL;
15465 15463 }
15466 15464
15467 15465 SD_UPDATE_RDWR_STATS(un, bp);
15468 15466 SD_UPDATE_PARTITION_STATS(un, bp);
15469 15467
15470 15468 switch (un->un_state) {
15471 15469 case SD_STATE_SUSPENDED:
15472 15470 /*
15473 15471 * Notify any threads waiting in sd_ddi_suspend() that
15474 15472 * a command completion has occurred.
15475 15473 */
15476 15474 cv_broadcast(&un->un_disk_busy_cv);
15477 15475 break;
15478 15476 default:
15479 15477 sd_start_cmds(un, NULL);
15480 15478 break;
15481 15479 }
15482 15480
15483 15481 /* Return this command up the iodone chain to its originator. */
15484 15482 mutex_exit(SD_MUTEX(un));
15485 15483
15486 15484 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15487 15485 xp->xb_pktp = NULL;
15488 15486
15489 15487 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15490 15488
15491 15489 ASSERT(!mutex_owned(SD_MUTEX(un)));
15492 15490 mutex_enter(SD_MUTEX(un));
15493 15491
15494 15492 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
15495 15493 }
15496 15494
15497 15495
15498 15496 /*
15499 15497 * Function: sd_return_failed_command
15500 15498 *
15501 15499 * Description: Command completion when an error occurred.
15502 15500 *
15503 15501 * Context: May be called from interrupt context
15504 15502 */
15505 15503
15506 15504 static void
15507 15505 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
15508 15506 {
15509 15507 ASSERT(bp != NULL);
15510 15508 ASSERT(un != NULL);
15511 15509 ASSERT(mutex_owned(SD_MUTEX(un)));
15512 15510
15513 15511 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15514 15512 "sd_return_failed_command: entry\n");
15515 15513
15516 15514 /*
15517 15515 * b_resid could already be nonzero due to a partial data
15518 15516 * transfer, so do not change it here.
15519 15517 */
15520 15518 SD_BIOERROR(bp, errcode);
15521 15519
15522 15520 sd_return_command(un, bp);
15523 15521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15524 15522 "sd_return_failed_command: exit\n");
15525 15523 }
15526 15524
15527 15525
15528 15526 /*
15529 15527 * Function: sd_return_failed_command_no_restart
15530 15528 *
15531 15529 * Description: Same as sd_return_failed_command, but ensures that no
15532 15530 * call back into sd_start_cmds will be issued.
15533 15531 *
15534 15532 * Context: May be called from interrupt context
15535 15533 */
15536 15534
15537 15535 static void
15538 15536 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
15539 15537 int errcode)
15540 15538 {
15541 15539 struct sd_xbuf *xp;
15542 15540
15543 15541 ASSERT(bp != NULL);
15544 15542 ASSERT(un != NULL);
15545 15543 ASSERT(mutex_owned(SD_MUTEX(un)));
15546 15544 xp = SD_GET_XBUF(bp);
15547 15545 ASSERT(xp != NULL);
15548 15546 ASSERT(errcode != 0);
15549 15547
15550 15548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15551 15549 "sd_return_failed_command_no_restart: entry\n");
15552 15550
15553 15551 /*
15554 15552 * b_resid could already be nonzero due to a partial data
15555 15553 * transfer, so do not change it here.
15556 15554 */
15557 15555 SD_BIOERROR(bp, errcode);
15558 15556
15559 15557 /*
15560 15558 * If this is the failfast bp, clear it. This can happen if the
15561 15559 * failfast bp encounterd a fatal error when we attempted to
15562 15560 * re-try it (such as a scsi_transport(9F) failure). However
15563 15561 * we should NOT be in an active failfast state if the failfast
15564 15562 * bp is not NULL.
15565 15563 */
15566 15564 if (bp == un->un_failfast_bp) {
15567 15565 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15568 15566 un->un_failfast_bp = NULL;
15569 15567 }
15570 15568
15571 15569 if (bp == un->un_retry_bp) {
15572 15570 /*
15573 15571 * This command was retried one or more times. Show that we are
15574 15572 * done with it, and allow processing of the waitq to resume.
15575 15573 */
15576 15574 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15577 15575 "sd_return_failed_command_no_restart: "
15578 15576 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15579 15577 un->un_retry_bp = NULL;
15580 15578 un->un_retry_statp = NULL;
15581 15579 }
15582 15580
15583 15581 SD_UPDATE_RDWR_STATS(un, bp);
15584 15582 SD_UPDATE_PARTITION_STATS(un, bp);
15585 15583
15586 15584 mutex_exit(SD_MUTEX(un));
15587 15585
15588 15586 if (xp->xb_pktp != NULL) {
15589 15587 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15590 15588 xp->xb_pktp = NULL;
15591 15589 }
15592 15590
15593 15591 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15594 15592
15595 15593 mutex_enter(SD_MUTEX(un));
15596 15594
15597 15595 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15598 15596 "sd_return_failed_command_no_restart: exit\n");
15599 15597 }
15600 15598
15601 15599
15602 15600 /*
15603 15601 * Function: sd_retry_command
15604 15602 *
15605 15603 * Description: queue up a command for retry, or (optionally) fail it
15606 15604 * if retry counts are exhausted.
15607 15605 *
15608 15606 * Arguments: un - Pointer to the sd_lun struct for the target.
15609 15607 *
15610 15608 * bp - Pointer to the buf for the command to be retried.
15611 15609 *
15612 15610 * retry_check_flag - Flag to see which (if any) of the retry
15613 15611 * counts should be decremented/checked. If the indicated
15614 15612 * retry count is exhausted, then the command will not be
15615 15613 * retried; it will be failed instead. This should use a
15616 15614 * value equal to one of the following:
15617 15615 *
15618 15616 * SD_RETRIES_NOCHECK
15619 15617 * SD_RESD_RETRIES_STANDARD
15620 15618 * SD_RETRIES_VICTIM
15621 15619 *
15622 15620 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15623 15621 * if the check should be made to see of FLAG_ISOLATE is set
15624 15622 * in the pkt. If FLAG_ISOLATE is set, then the command is
15625 15623 * not retried, it is simply failed.
15626 15624 *
15627 15625 * user_funcp - Ptr to function to call before dispatching the
15628 15626 * command. May be NULL if no action needs to be performed.
15629 15627 * (Primarily intended for printing messages.)
15630 15628 *
15631 15629 * user_arg - Optional argument to be passed along to
15632 15630 * the user_funcp call.
15633 15631 *
15634 15632 * failure_code - errno return code to set in the bp if the
15635 15633 * command is going to be failed.
15636 15634 *
15637 15635 * retry_delay - Retry delay interval in (clock_t) units. May
15638 15636 * be zero which indicates that the retry should be retried
15639 15637 * immediately (ie, without an intervening delay).
15640 15638 *
15641 15639 * statp - Ptr to kstat function to be updated if the command
15642 15640 * is queued for a delayed retry. May be NULL if no kstat
15643 15641 * update is desired.
15644 15642 *
15645 15643 * Context: May be called from interrupt context.
15646 15644 */
15647 15645
15648 15646 static void
15649 15647 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
15650 15648 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int
15651 15649 code), void *user_arg, int failure_code, clock_t retry_delay,
15652 15650 void (*statp)(kstat_io_t *))
15653 15651 {
15654 15652 struct sd_xbuf *xp;
15655 15653 struct scsi_pkt *pktp;
15656 15654 struct sd_fm_internal *sfip;
15657 15655
15658 15656 ASSERT(un != NULL);
15659 15657 ASSERT(mutex_owned(SD_MUTEX(un)));
15660 15658 ASSERT(bp != NULL);
15661 15659 xp = SD_GET_XBUF(bp);
15662 15660 ASSERT(xp != NULL);
15663 15661 pktp = SD_GET_PKTP(bp);
15664 15662 ASSERT(pktp != NULL);
15665 15663
15666 15664 sfip = (struct sd_fm_internal *)un->un_fm_private;
15667 15665 ASSERT(sfip != NULL);
15668 15666
15669 15667 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15670 15668 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
15671 15669
15672 15670 /*
15673 15671 * If we are syncing or dumping, fail the command to avoid
15674 15672 * recursively calling back into scsi_transport().
15675 15673 */
15676 15674 if (ddi_in_panic()) {
15677 15675 goto fail_command_no_log;
15678 15676 }
15679 15677
15680 15678 /*
15681 15679 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15682 15680 * log an error and fail the command.
15683 15681 */
15684 15682 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
15685 15683 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
15686 15684 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15687 15685 sd_dump_memory(un, SD_LOG_IO, "CDB",
15688 15686 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
15689 15687 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
15690 15688 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
15691 15689 goto fail_command;
15692 15690 }
15693 15691
15694 15692 /*
15695 15693 * If we are suspended, then put the command onto head of the
15696 15694 * wait queue since we don't want to start more commands, and
15697 15695 * clear the un_retry_bp. Next time when we are resumed, will
15698 15696 * handle the command in the wait queue.
15699 15697 */
15700 15698 switch (un->un_state) {
15701 15699 case SD_STATE_SUSPENDED:
15702 15700 case SD_STATE_DUMPING:
15703 15701 bp->av_forw = un->un_waitq_headp;
15704 15702 un->un_waitq_headp = bp;
15705 15703 if (un->un_waitq_tailp == NULL) {
15706 15704 un->un_waitq_tailp = bp;
15707 15705 }
15708 15706 if (bp == un->un_retry_bp) {
15709 15707 un->un_retry_bp = NULL;
15710 15708 un->un_retry_statp = NULL;
15711 15709 }
15712 15710 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
15713 15711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
15714 15712 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
15715 15713 return;
15716 15714 default:
15717 15715 break;
15718 15716 }
15719 15717
15720 15718 /*
15721 15719 * If the caller wants us to check FLAG_ISOLATE, then see if that
15722 15720 * is set; if it is then we do not want to retry the command.
15723 15721 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15724 15722 */
15725 15723 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15726 15724 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15727 15725 goto fail_command;
15728 15726 }
15729 15727 }
15730 15728
15731 15729
15732 15730 /*
15733 15731 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15734 15732 * command timeout or a selection timeout has occurred. This means
15735 15733 * that we were unable to establish an kind of communication with
15736 15734 * the target, and subsequent retries and/or commands are likely
15737 15735 * to encounter similar results and take a long time to complete.
15738 15736 *
15739 15737 * If this is a failfast error condition, we need to update the
15740 15738 * failfast state, even if this bp does not have B_FAILFAST set.
15741 15739 */
15742 15740 if (retry_check_flag & SD_RETRIES_FAILFAST) {
15743 15741 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15744 15742 ASSERT(un->un_failfast_bp == NULL);
15745 15743 /*
15746 15744 * If we are already in the active failfast state, and
15747 15745 * another failfast error condition has been detected,
15748 15746 * then fail this command if it has B_FAILFAST set.
15749 15747 * If B_FAILFAST is clear, then maintain the legacy
15750 15748 * behavior of retrying heroically, even tho this will
15751 15749 * take a lot more time to fail the command.
15752 15750 */
15753 15751 if (bp->b_flags & B_FAILFAST) {
15754 15752 goto fail_command;
15755 15753 }
15756 15754 } else {
15757 15755 /*
15758 15756 * We're not in the active failfast state, but we
15759 15757 * have a failfast error condition, so we must begin
15760 15758 * transition to the next state. We do this regardless
15761 15759 * of whether or not this bp has B_FAILFAST set.
15762 15760 */
15763 15761 if (un->un_failfast_bp == NULL) {
15764 15762 /*
15765 15763 * This is the first bp to meet a failfast
15766 15764 * condition so save it on un_failfast_bp &
15767 15765 * do normal retry processing. Do not enter
15768 15766 * active failfast state yet. This marks
15769 15767 * entry into the "failfast pending" state.
15770 15768 */
15771 15769 un->un_failfast_bp = bp;
15772 15770
15773 15771 } else if (un->un_failfast_bp == bp) {
15774 15772 /*
15775 15773 * This is the second time *this* bp has
15776 15774 * encountered a failfast error condition,
15777 15775 * so enter active failfast state & flush
15778 15776 * queues as appropriate.
15779 15777 */
15780 15778 un->un_failfast_state = SD_FAILFAST_ACTIVE;
15781 15779 un->un_failfast_bp = NULL;
15782 15780 sd_failfast_flushq(un);
15783 15781
15784 15782 /*
15785 15783 * Fail this bp now if B_FAILFAST set;
15786 15784 * otherwise continue with retries. (It would
15787 15785 * be pretty ironic if this bp succeeded on a
15788 15786 * subsequent retry after we just flushed all
15789 15787 * the queues).
15790 15788 */
15791 15789 if (bp->b_flags & B_FAILFAST) {
15792 15790 goto fail_command;
15793 15791 }
15794 15792
15795 15793 #if !defined(lint) && !defined(__lint)
15796 15794 } else {
15797 15795 /*
15798 15796 * If neither of the preceeding conditionals
15799 15797 * was true, it means that there is some
15800 15798 * *other* bp that has met an inital failfast
15801 15799 * condition and is currently either being
15802 15800 * retried or is waiting to be retried. In
15803 15801 * that case we should perform normal retry
15804 15802 * processing on *this* bp, since there is a
15805 15803 * chance that the current failfast condition
15806 15804 * is transient and recoverable. If that does
15807 15805 * not turn out to be the case, then retries
15808 15806 * will be cleared when the wait queue is
15809 15807 * flushed anyway.
15810 15808 */
15811 15809 #endif
15812 15810 }
15813 15811 }
15814 15812 } else {
15815 15813 /*
15816 15814 * SD_RETRIES_FAILFAST is clear, which indicates that we
15817 15815 * likely were able to at least establish some level of
15818 15816 * communication with the target and subsequent commands
15819 15817 * and/or retries are likely to get through to the target,
15820 15818 * In this case we want to be aggressive about clearing
15821 15819 * the failfast state. Note that this does not affect
15822 15820 * the "failfast pending" condition.
15823 15821 */
15824 15822 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15825 15823 }
15826 15824
15827 15825
15828 15826 /*
15829 15827 * Check the specified retry count to see if we can still do
15830 15828 * any retries with this pkt before we should fail it.
15831 15829 */
15832 15830 switch (retry_check_flag & SD_RETRIES_MASK) {
15833 15831 case SD_RETRIES_VICTIM:
15834 15832 /*
15835 15833 * Check the victim retry count. If exhausted, then fall
15836 15834 * thru & check against the standard retry count.
15837 15835 */
15838 15836 if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
15839 15837 /* Increment count & proceed with the retry */
15840 15838 xp->xb_victim_retry_count++;
15841 15839 break;
15842 15840 }
15843 15841 /* Victim retries exhausted, fall back to std. retries... */
15844 15842 /* FALLTHRU */
15845 15843
15846 15844 case SD_RETRIES_STANDARD:
15847 15845 if (xp->xb_retry_count >= un->un_retry_count) {
15848 15846 /* Retries exhausted, fail the command */
15849 15847 SD_TRACE(SD_LOG_IO_CORE, un,
15850 15848 "sd_retry_command: retries exhausted!\n");
15851 15849 /*
15852 15850 * update b_resid for failed SCMD_READ & SCMD_WRITE
15853 15851 * commands with nonzero pkt_resid.
15854 15852 */
15855 15853 if ((pktp->pkt_reason == CMD_CMPLT) &&
15856 15854 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
15857 15855 (pktp->pkt_resid != 0)) {
15858 15856 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
15859 15857 if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
15860 15858 SD_UPDATE_B_RESID(bp, pktp);
15861 15859 }
15862 15860 }
15863 15861 goto fail_command;
15864 15862 }
15865 15863 xp->xb_retry_count++;
15866 15864 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15867 15865 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15868 15866 break;
15869 15867
15870 15868 case SD_RETRIES_UA:
15871 15869 if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
15872 15870 /* Retries exhausted, fail the command */
15873 15871 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
15874 15872 "Unit Attention retries exhausted. "
15875 15873 "Check the target.\n");
15876 15874 goto fail_command;
15877 15875 }
15878 15876 xp->xb_ua_retry_count++;
15879 15877 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15880 15878 "sd_retry_command: retry count:%d\n",
15881 15879 xp->xb_ua_retry_count);
15882 15880 break;
15883 15881
15884 15882 case SD_RETRIES_BUSY:
15885 15883 if (xp->xb_retry_count >= un->un_busy_retry_count) {
15886 15884 /* Retries exhausted, fail the command */
15887 15885 SD_TRACE(SD_LOG_IO_CORE, un,
15888 15886 "sd_retry_command: retries exhausted!\n");
15889 15887 goto fail_command;
15890 15888 }
15891 15889 xp->xb_retry_count++;
15892 15890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15893 15891 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15894 15892 break;
15895 15893
15896 15894 case SD_RETRIES_NOCHECK:
15897 15895 default:
15898 15896 /* No retry count to check. Just proceed with the retry */
15899 15897 break;
15900 15898 }
15901 15899
15902 15900 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15903 15901
15904 15902 /*
15905 15903 * If this is a non-USCSI command being retried
15906 15904 * during execution last time, we should post an ereport with
15907 15905 * driver-assessment of the value "retry".
15908 15906 * For partial DMA, request sense and STATUS_QFULL, there are no
15909 15907 * hardware errors, we bypass ereport posting.
15910 15908 */
15911 15909 if (failure_code != 0) {
15912 15910 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15913 15911 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15914 15912 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
15915 15913 }
15916 15914 }
15917 15915
15918 15916 /*
15919 15917 * If we were given a zero timeout, we must attempt to retry the
15920 15918 * command immediately (ie, without a delay).
15921 15919 */
15922 15920 if (retry_delay == 0) {
15923 15921 /*
15924 15922 * Check some limiting conditions to see if we can actually
15925 15923 * do the immediate retry. If we cannot, then we must
15926 15924 * fall back to queueing up a delayed retry.
15927 15925 */
15928 15926 if (un->un_ncmds_in_transport >= un->un_throttle) {
15929 15927 /*
15930 15928 * We are at the throttle limit for the target,
15931 15929 * fall back to delayed retry.
15932 15930 */
15933 15931 retry_delay = un->un_busy_timeout;
15934 15932 statp = kstat_waitq_enter;
15935 15933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15936 15934 "sd_retry_command: immed. retry hit "
15937 15935 "throttle!\n");
15938 15936 } else {
15939 15937 /*
15940 15938 * We're clear to proceed with the immediate retry.
15941 15939 * First call the user-provided function (if any)
15942 15940 */
15943 15941 if (user_funcp != NULL) {
15944 15942 (*user_funcp)(un, bp, user_arg,
15945 15943 SD_IMMEDIATE_RETRY_ISSUED);
15946 15944 #ifdef __lock_lint
15947 15945 sd_print_incomplete_msg(un, bp, user_arg,
15948 15946 SD_IMMEDIATE_RETRY_ISSUED);
15949 15947 sd_print_cmd_incomplete_msg(un, bp, user_arg,
15950 15948 SD_IMMEDIATE_RETRY_ISSUED);
15951 15949 sd_print_sense_failed_msg(un, bp, user_arg,
15952 15950 SD_IMMEDIATE_RETRY_ISSUED);
15953 15951 #endif
15954 15952 }
15955 15953
15956 15954 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15957 15955 "sd_retry_command: issuing immediate retry\n");
15958 15956
15959 15957 /*
15960 15958 * Call sd_start_cmds() to transport the command to
15961 15959 * the target.
15962 15960 */
15963 15961 sd_start_cmds(un, bp);
15964 15962
15965 15963 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15966 15964 "sd_retry_command exit\n");
15967 15965 return;
15968 15966 }
15969 15967 }
15970 15968
15971 15969 /*
15972 15970 * Set up to retry the command after a delay.
15973 15971 * First call the user-provided function (if any)
15974 15972 */
15975 15973 if (user_funcp != NULL) {
15976 15974 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
15977 15975 }
15978 15976
15979 15977 sd_set_retry_bp(un, bp, retry_delay, statp);
15980 15978
15981 15979 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15982 15980 return;
15983 15981
15984 15982 fail_command:
15985 15983
15986 15984 if (user_funcp != NULL) {
15987 15985 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
15988 15986 }
15989 15987
15990 15988 fail_command_no_log:
15991 15989
15992 15990 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15993 15991 "sd_retry_command: returning failed command\n");
15994 15992
15995 15993 sd_return_failed_command(un, bp, failure_code);
15996 15994
15997 15995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15998 15996 }
15999 15997
16000 15998
16001 15999 /*
16002 16000 * Function: sd_set_retry_bp
16003 16001 *
16004 16002 * Description: Set up the given bp for retry.
16005 16003 *
16006 16004 * Arguments: un - ptr to associated softstate
16007 16005 * bp - ptr to buf(9S) for the command
16008 16006 * retry_delay - time interval before issuing retry (may be 0)
16009 16007 * statp - optional pointer to kstat function
16010 16008 *
16011 16009 * Context: May be called under interrupt context
16012 16010 */
16013 16011
16014 16012 static void
16015 16013 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
16016 16014 void (*statp)(kstat_io_t *))
16017 16015 {
16018 16016 ASSERT(un != NULL);
16019 16017 ASSERT(mutex_owned(SD_MUTEX(un)));
16020 16018 ASSERT(bp != NULL);
16021 16019
16022 16020 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16023 16021 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
16024 16022
16025 16023 /*
16026 16024 * Indicate that the command is being retried. This will not allow any
16027 16025 * other commands on the wait queue to be transported to the target
16028 16026 * until this command has been completed (success or failure). The
16029 16027 * "retry command" is not transported to the target until the given
16030 16028 * time delay expires, unless the user specified a 0 retry_delay.
16031 16029 *
16032 16030 * Note: the timeout(9F) callback routine is what actually calls
16033 16031 * sd_start_cmds() to transport the command, with the exception of a
16034 16032 * zero retry_delay. The only current implementor of a zero retry delay
16035 16033 * is the case where a START_STOP_UNIT is sent to spin-up a device.
16036 16034 */
16037 16035 if (un->un_retry_bp == NULL) {
16038 16036 ASSERT(un->un_retry_statp == NULL);
16039 16037 un->un_retry_bp = bp;
16040 16038
16041 16039 /*
16042 16040 * If the user has not specified a delay the command should
16043 16041 * be queued and no timeout should be scheduled.
16044 16042 */
16045 16043 if (retry_delay == 0) {
16046 16044 /*
16047 16045 * Save the kstat pointer that will be used in the
16048 16046 * call to SD_UPDATE_KSTATS() below, so that
16049 16047 * sd_start_cmds() can correctly decrement the waitq
16050 16048 * count when it is time to transport this command.
16051 16049 */
16052 16050 un->un_retry_statp = statp;
16053 16051 goto done;
16054 16052 }
16055 16053 }
16056 16054
16057 16055 if (un->un_retry_bp == bp) {
16058 16056 /*
16059 16057 * Save the kstat pointer that will be used in the call to
16060 16058 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16061 16059 * correctly decrement the waitq count when it is time to
16062 16060 * transport this command.
16063 16061 */
16064 16062 un->un_retry_statp = statp;
16065 16063
16066 16064 /*
16067 16065 * Schedule a timeout if:
16068 16066 * 1) The user has specified a delay.
16069 16067 * 2) There is not a START_STOP_UNIT callback pending.
16070 16068 *
16071 16069 * If no delay has been specified, then it is up to the caller
16072 16070 * to ensure that IO processing continues without stalling.
16073 16071 * Effectively, this means that the caller will issue the
16074 16072 * required call to sd_start_cmds(). The START_STOP_UNIT
16075 16073 * callback does this after the START STOP UNIT command has
16076 16074 * completed. In either of these cases we should not schedule
16077 16075 * a timeout callback here. Also don't schedule the timeout if
16078 16076 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16079 16077 */
16080 16078 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
16081 16079 (un->un_direct_priority_timeid == NULL)) {
16082 16080 un->un_retry_timeid =
16083 16081 timeout(sd_start_retry_command, un, retry_delay);
16084 16082 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16085 16083 "sd_set_retry_bp: setting timeout: un: 0x%p"
16086 16084 " bp:0x%p un_retry_timeid:0x%p\n",
16087 16085 un, bp, un->un_retry_timeid);
16088 16086 }
16089 16087 } else {
16090 16088 /*
16091 16089 * We only get in here if there is already another command
16092 16090 * waiting to be retried. In this case, we just put the
16093 16091 * given command onto the wait queue, so it can be transported
16094 16092 * after the current retry command has completed.
16095 16093 *
16096 16094 * Also we have to make sure that if the command at the head
16097 16095 * of the wait queue is the un_failfast_bp, that we do not
16098 16096 * put ahead of it any other commands that are to be retried.
16099 16097 */
16100 16098 if ((un->un_failfast_bp != NULL) &&
16101 16099 (un->un_failfast_bp == un->un_waitq_headp)) {
16102 16100 /*
16103 16101 * Enqueue this command AFTER the first command on
16104 16102 * the wait queue (which is also un_failfast_bp).
16105 16103 */
16106 16104 bp->av_forw = un->un_waitq_headp->av_forw;
16107 16105 un->un_waitq_headp->av_forw = bp;
16108 16106 if (un->un_waitq_headp == un->un_waitq_tailp) {
16109 16107 un->un_waitq_tailp = bp;
16110 16108 }
16111 16109 } else {
16112 16110 /* Enqueue this command at the head of the waitq. */
16113 16111 bp->av_forw = un->un_waitq_headp;
16114 16112 un->un_waitq_headp = bp;
16115 16113 if (un->un_waitq_tailp == NULL) {
16116 16114 un->un_waitq_tailp = bp;
16117 16115 }
16118 16116 }
16119 16117
16120 16118 if (statp == NULL) {
16121 16119 statp = kstat_waitq_enter;
16122 16120 }
16123 16121 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16124 16122 "sd_set_retry_bp: un:0x%p already delayed retry\n", un);
16125 16123 }
16126 16124
16127 16125 done:
16128 16126 if (statp != NULL) {
16129 16127 SD_UPDATE_KSTATS(un, statp, bp);
16130 16128 }
16131 16129
16132 16130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16133 16131 "sd_set_retry_bp: exit un:0x%p\n", un);
16134 16132 }
16135 16133
16136 16134
16137 16135 /*
16138 16136 * Function: sd_start_retry_command
16139 16137 *
16140 16138 * Description: Start the command that has been waiting on the target's
16141 16139 * retry queue. Called from timeout(9F) context after the
16142 16140 * retry delay interval has expired.
16143 16141 *
16144 16142 * Arguments: arg - pointer to associated softstate for the device.
16145 16143 *
16146 16144 * Context: timeout(9F) thread context. May not sleep.
16147 16145 */
16148 16146
16149 16147 static void
16150 16148 sd_start_retry_command(void *arg)
16151 16149 {
16152 16150 struct sd_lun *un = arg;
16153 16151
16154 16152 ASSERT(un != NULL);
16155 16153 ASSERT(!mutex_owned(SD_MUTEX(un)));
16156 16154
16157 16155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16158 16156 "sd_start_retry_command: entry\n");
16159 16157
16160 16158 mutex_enter(SD_MUTEX(un));
16161 16159
16162 16160 un->un_retry_timeid = NULL;
16163 16161
16164 16162 if (un->un_retry_bp != NULL) {
16165 16163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16166 16164 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16167 16165 un, un->un_retry_bp);
16168 16166 sd_start_cmds(un, un->un_retry_bp);
16169 16167 }
16170 16168
16171 16169 mutex_exit(SD_MUTEX(un));
16172 16170
16173 16171 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16174 16172 "sd_start_retry_command: exit\n");
16175 16173 }
16176 16174
16177 16175 /*
16178 16176 * Function: sd_rmw_msg_print_handler
16179 16177 *
16180 16178 * Description: If RMW mode is enabled and warning message is triggered
16181 16179 * print I/O count during a fixed interval.
16182 16180 *
16183 16181 * Arguments: arg - pointer to associated softstate for the device.
16184 16182 *
16185 16183 * Context: timeout(9F) thread context. May not sleep.
16186 16184 */
16187 16185 static void
16188 16186 sd_rmw_msg_print_handler(void *arg)
16189 16187 {
16190 16188 struct sd_lun *un = arg;
16191 16189
16192 16190 ASSERT(un != NULL);
16193 16191 ASSERT(!mutex_owned(SD_MUTEX(un)));
16194 16192
16195 16193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16196 16194 "sd_rmw_msg_print_handler: entry\n");
16197 16195
16198 16196 mutex_enter(SD_MUTEX(un));
16199 16197
16200 16198 if (un->un_rmw_incre_count > 0) {
16201 16199 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16202 16200 "%"PRIu64" I/O requests are not aligned with %d disk "
16203 16201 "sector size in %ld seconds. They are handled through "
16204 16202 "Read Modify Write but the performance is very low!\n",
16205 16203 un->un_rmw_incre_count, un->un_tgt_blocksize,
16206 16204 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
16207 16205 un->un_rmw_incre_count = 0;
16208 16206 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
16209 16207 un, SD_RMW_MSG_PRINT_TIMEOUT);
16210 16208 } else {
16211 16209 un->un_rmw_msg_timeid = NULL;
16212 16210 }
16213 16211
16214 16212 mutex_exit(SD_MUTEX(un));
16215 16213
16216 16214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16217 16215 "sd_rmw_msg_print_handler: exit\n");
16218 16216 }
16219 16217
16220 16218 /*
16221 16219 * Function: sd_start_direct_priority_command
16222 16220 *
16223 16221 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16224 16222 * received TRAN_BUSY when we called scsi_transport() to send it
16225 16223 * to the underlying HBA. This function is called from timeout(9F)
16226 16224 * context after the delay interval has expired.
16227 16225 *
16228 16226 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16229 16227 *
16230 16228 * Context: timeout(9F) thread context. May not sleep.
16231 16229 */
16232 16230
16233 16231 static void
16234 16232 sd_start_direct_priority_command(void *arg)
16235 16233 {
16236 16234 struct buf *priority_bp = arg;
16237 16235 struct sd_lun *un;
16238 16236
16239 16237 ASSERT(priority_bp != NULL);
16240 16238 un = SD_GET_UN(priority_bp);
16241 16239 ASSERT(un != NULL);
16242 16240 ASSERT(!mutex_owned(SD_MUTEX(un)));
16243 16241
16244 16242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16245 16243 "sd_start_direct_priority_command: entry\n");
16246 16244
16247 16245 mutex_enter(SD_MUTEX(un));
16248 16246 un->un_direct_priority_timeid = NULL;
16249 16247 sd_start_cmds(un, priority_bp);
16250 16248 mutex_exit(SD_MUTEX(un));
16251 16249
16252 16250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16253 16251 "sd_start_direct_priority_command: exit\n");
16254 16252 }
16255 16253
16256 16254
16257 16255 /*
16258 16256 * Function: sd_send_request_sense_command
16259 16257 *
16260 16258 * Description: Sends a REQUEST SENSE command to the target
16261 16259 *
16262 16260 * Context: May be called from interrupt context.
16263 16261 */
16264 16262
16265 16263 static void
16266 16264 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16267 16265 struct scsi_pkt *pktp)
16268 16266 {
16269 16267 ASSERT(bp != NULL);
16270 16268 ASSERT(un != NULL);
16271 16269 ASSERT(mutex_owned(SD_MUTEX(un)));
16272 16270
16273 16271 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16274 16272 "entry: buf:0x%p\n", bp);
16275 16273
16276 16274 /*
16277 16275 * If we are syncing or dumping, then fail the command to avoid a
16278 16276 * recursive callback into scsi_transport(). Also fail the command
16279 16277 * if we are suspended (legacy behavior).
16280 16278 */
16281 16279 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
16282 16280 (un->un_state == SD_STATE_DUMPING)) {
16283 16281 sd_return_failed_command(un, bp, EIO);
16284 16282 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16285 16283 "sd_send_request_sense_command: syncing/dumping, exit\n");
16286 16284 return;
16287 16285 }
16288 16286
16289 16287 /*
16290 16288 * Retry the failed command and don't issue the request sense if:
16291 16289 * 1) the sense buf is busy
16292 16290 * 2) we have 1 or more outstanding commands on the target
16293 16291 * (the sense data will be cleared or invalidated any way)
16294 16292 *
16295 16293 * Note: There could be an issue with not checking a retry limit here,
16296 16294 * the problem is determining which retry limit to check.
16297 16295 */
16298 16296 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16299 16297 /* Don't retry if the command is flagged as non-retryable */
16300 16298 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16301 16299 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
16302 16300 NULL, NULL, 0, un->un_busy_timeout,
16303 16301 kstat_waitq_enter);
16304 16302 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16305 16303 "sd_send_request_sense_command: "
16306 16304 "at full throttle, retrying exit\n");
16307 16305 } else {
16308 16306 sd_return_failed_command(un, bp, EIO);
16309 16307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16310 16308 "sd_send_request_sense_command: "
16311 16309 "at full throttle, non-retryable exit\n");
16312 16310 }
16313 16311 return;
16314 16312 }
16315 16313
16316 16314 sd_mark_rqs_busy(un, bp);
16317 16315 sd_start_cmds(un, un->un_rqs_bp);
16318 16316
16319 16317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16320 16318 "sd_send_request_sense_command: exit\n");
16321 16319 }
16322 16320
16323 16321
16324 16322 /*
16325 16323 * Function: sd_mark_rqs_busy
16326 16324 *
16327 16325 * Description: Indicate that the request sense bp for this instance is
16328 16326 * in use.
16329 16327 *
16330 16328 * Context: May be called under interrupt context
16331 16329 */
16332 16330
16333 16331 static void
16334 16332 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
16335 16333 {
16336 16334 struct sd_xbuf *sense_xp;
16337 16335
16338 16336 ASSERT(un != NULL);
16339 16337 ASSERT(bp != NULL);
16340 16338 ASSERT(mutex_owned(SD_MUTEX(un)));
16341 16339 ASSERT(un->un_sense_isbusy == 0);
16342 16340
16343 16341 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
16344 16342 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
16345 16343
16346 16344 sense_xp = SD_GET_XBUF(un->un_rqs_bp);
16347 16345 ASSERT(sense_xp != NULL);
16348 16346
16349 16347 SD_INFO(SD_LOG_IO, un,
16350 16348 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
16351 16349
16352 16350 ASSERT(sense_xp->xb_pktp != NULL);
16353 16351 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
16354 16352 == (FLAG_SENSING | FLAG_HEAD));
16355 16353
16356 16354 un->un_sense_isbusy = 1;
16357 16355 un->un_rqs_bp->b_resid = 0;
16358 16356 sense_xp->xb_pktp->pkt_resid = 0;
16359 16357 sense_xp->xb_pktp->pkt_reason = 0;
16360 16358
16361 16359 /* So we can get back the bp at interrupt time! */
16362 16360 sense_xp->xb_sense_bp = bp;
16363 16361
16364 16362 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
16365 16363
16366 16364 /*
16367 16365 * Mark this buf as awaiting sense data. (This is already set in
16368 16366 * the pkt_flags for the RQS packet.)
16369 16367 */
16370 16368 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
16371 16369
16372 16370 /* Request sense down same path */
16373 16371 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
16374 16372 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
16375 16373 sense_xp->xb_pktp->pkt_path_instance =
16376 16374 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
16377 16375
16378 16376 sense_xp->xb_retry_count = 0;
16379 16377 sense_xp->xb_victim_retry_count = 0;
16380 16378 sense_xp->xb_ua_retry_count = 0;
16381 16379 sense_xp->xb_nr_retry_count = 0;
16382 16380 sense_xp->xb_dma_resid = 0;
16383 16381
16384 16382 /* Clean up the fields for auto-request sense */
16385 16383 sense_xp->xb_sense_status = 0;
16386 16384 sense_xp->xb_sense_state = 0;
16387 16385 sense_xp->xb_sense_resid = 0;
16388 16386 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
16389 16387
16390 16388 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
16391 16389 }
16392 16390
16393 16391
16394 16392 /*
16395 16393 * Function: sd_mark_rqs_idle
16396 16394 *
16397 16395 * Description: SD_MUTEX must be held continuously through this routine
16398 16396 * to prevent reuse of the rqs struct before the caller can
16399 16397 * complete it's processing.
16400 16398 *
16401 16399 * Return Code: Pointer to the RQS buf
16402 16400 *
16403 16401 * Context: May be called under interrupt context
16404 16402 */
16405 16403
16406 16404 static struct buf *
16407 16405 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
16408 16406 {
16409 16407 struct buf *bp;
16410 16408 ASSERT(un != NULL);
16411 16409 ASSERT(sense_xp != NULL);
16412 16410 ASSERT(mutex_owned(SD_MUTEX(un)));
16413 16411 ASSERT(un->un_sense_isbusy != 0);
16414 16412
16415 16413 un->un_sense_isbusy = 0;
16416 16414 bp = sense_xp->xb_sense_bp;
16417 16415 sense_xp->xb_sense_bp = NULL;
16418 16416
16419 16417 /* This pkt is no longer interested in getting sense data */
16420 16418 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
16421 16419
16422 16420 return (bp);
16423 16421 }
16424 16422
16425 16423
16426 16424
16427 16425 /*
16428 16426 * Function: sd_alloc_rqs
16429 16427 *
16430 16428 * Description: Set up the unit to receive auto request sense data
16431 16429 *
16432 16430 * Return Code: DDI_SUCCESS or DDI_FAILURE
16433 16431 *
16434 16432 * Context: Called under attach(9E) context
16435 16433 */
16436 16434
16437 16435 static int
16438 16436 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
16439 16437 {
16440 16438 struct sd_xbuf *xp;
16441 16439
16442 16440 ASSERT(un != NULL);
16443 16441 ASSERT(!mutex_owned(SD_MUTEX(un)));
16444 16442 ASSERT(un->un_rqs_bp == NULL);
16445 16443 ASSERT(un->un_rqs_pktp == NULL);
16446 16444
16447 16445 /*
16448 16446 * First allocate the required buf and scsi_pkt structs, then set up
16449 16447 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16450 16448 */
16451 16449 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
16452 16450 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
16453 16451 if (un->un_rqs_bp == NULL) {
16454 16452 return (DDI_FAILURE);
16455 16453 }
16456 16454
16457 16455 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
16458 16456 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
16459 16457
16460 16458 if (un->un_rqs_pktp == NULL) {
16461 16459 sd_free_rqs(un);
16462 16460 return (DDI_FAILURE);
16463 16461 }
16464 16462
16465 16463 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16466 16464 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16467 16465 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16468 16466
16469 16467 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16470 16468
16471 16469 /* Set up the other needed members in the ARQ scsi_pkt. */
16472 16470 un->un_rqs_pktp->pkt_comp = sdintr;
16473 16471 un->un_rqs_pktp->pkt_time = sd_io_time;
16474 16472 un->un_rqs_pktp->pkt_flags |=
16475 16473 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
16476 16474
16477 16475 /*
16478 16476 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16479 16477 * provide any intpkt, destroypkt routines as we take care of
16480 16478 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16481 16479 */
16482 16480 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16483 16481 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16484 16482 xp->xb_pktp = un->un_rqs_pktp;
16485 16483 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16486 16484 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16487 16485 un, xp, un->un_rqs_pktp, un->un_rqs_bp);
16488 16486
16489 16487 /*
16490 16488 * Save the pointer to the request sense private bp so it can
16491 16489 * be retrieved in sdintr.
16492 16490 */
16493 16491 un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
16494 16492 ASSERT(un->un_rqs_bp->b_private == xp);
16495 16493
16496 16494 /*
16497 16495 * See if the HBA supports auto-request sense for the specified
16498 16496 * target/lun. If it does, then try to enable it (if not already
16499 16497 * enabled).
16500 16498 *
16501 16499 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16502 16500 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16503 16501 * return success. However, in both of these cases ARQ is always
16504 16502 * enabled and scsi_ifgetcap will always return true. The best approach
16505 16503 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16506 16504 *
16507 16505 * The 3rd case is the HBA (adp) always return enabled on
16508 16506 * scsi_ifgetgetcap even when it's not enable, the best approach
16509 16507 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16510 16508 * Note: this case is to circumvent the Adaptec bug. (x86 only)
16511 16509 */
16512 16510
16513 16511 if (un->un_f_is_fibre == TRUE) {
16514 16512 un->un_f_arq_enabled = TRUE;
16515 16513 } else {
16516 16514 #if defined(__i386) || defined(__amd64)
16517 16515 /*
16518 16516 * Circumvent the Adaptec bug, remove this code when
16519 16517 * the bug is fixed
16520 16518 */
16521 16519 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16522 16520 #endif
16523 16521 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16524 16522 case 0:
16525 16523 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16526 16524 "sd_alloc_rqs: HBA supports ARQ\n");
16527 16525 /*
16528 16526 * ARQ is supported by this HBA but currently is not
16529 16527 * enabled. Attempt to enable it and if successful then
16530 16528 * mark this instance as ARQ enabled.
16531 16529 */
16532 16530 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
16533 16531 == 1) {
16534 16532 /* Successfully enabled ARQ in the HBA */
16535 16533 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16536 16534 "sd_alloc_rqs: ARQ enabled\n");
16537 16535 un->un_f_arq_enabled = TRUE;
16538 16536 } else {
16539 16537 /* Could not enable ARQ in the HBA */
16540 16538 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16541 16539 "sd_alloc_rqs: failed ARQ enable\n");
16542 16540 un->un_f_arq_enabled = FALSE;
16543 16541 }
16544 16542 break;
16545 16543 case 1:
16546 16544 /*
16547 16545 * ARQ is supported by this HBA and is already enabled.
16548 16546 * Just mark ARQ as enabled for this instance.
16549 16547 */
16550 16548 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16551 16549 "sd_alloc_rqs: ARQ already enabled\n");
16552 16550 un->un_f_arq_enabled = TRUE;
16553 16551 break;
16554 16552 default:
16555 16553 /*
16556 16554 * ARQ is not supported by this HBA; disable it for this
16557 16555 * instance.
16558 16556 */
16559 16557 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16560 16558 "sd_alloc_rqs: HBA does not support ARQ\n");
16561 16559 un->un_f_arq_enabled = FALSE;
16562 16560 break;
16563 16561 }
16564 16562 }
16565 16563
16566 16564 return (DDI_SUCCESS);
16567 16565 }
16568 16566
16569 16567
16570 16568 /*
16571 16569 * Function: sd_free_rqs
16572 16570 *
16573 16571 * Description: Cleanup for the pre-instance RQS command.
16574 16572 *
16575 16573 * Context: Kernel thread context
16576 16574 */
16577 16575
16578 16576 static void
16579 16577 sd_free_rqs(struct sd_lun *un)
16580 16578 {
16581 16579 ASSERT(un != NULL);
16582 16580
16583 16581 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
16584 16582
16585 16583 /*
16586 16584 * If consistent memory is bound to a scsi_pkt, the pkt
16587 16585 * has to be destroyed *before* freeing the consistent memory.
16588 16586 * Don't change the sequence of this operations.
16589 16587 * scsi_destroy_pkt() might access memory, which isn't allowed,
16590 16588 * after it was freed in scsi_free_consistent_buf().
16591 16589 */
16592 16590 if (un->un_rqs_pktp != NULL) {
16593 16591 scsi_destroy_pkt(un->un_rqs_pktp);
16594 16592 un->un_rqs_pktp = NULL;
16595 16593 }
16596 16594
16597 16595 if (un->un_rqs_bp != NULL) {
16598 16596 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
16599 16597 if (xp != NULL) {
16600 16598 kmem_free(xp, sizeof (struct sd_xbuf));
16601 16599 }
16602 16600 scsi_free_consistent_buf(un->un_rqs_bp);
16603 16601 un->un_rqs_bp = NULL;
16604 16602 }
16605 16603 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
16606 16604 }
16607 16605
16608 16606
16609 16607
16610 16608 /*
16611 16609 * Function: sd_reduce_throttle
16612 16610 *
16613 16611 * Description: Reduces the maximum # of outstanding commands on a
16614 16612 * target to the current number of outstanding commands.
16615 16613 * Queues a tiemout(9F) callback to restore the limit
16616 16614 * after a specified interval has elapsed.
16617 16615 * Typically used when we get a TRAN_BUSY return code
16618 16616 * back from scsi_transport().
16619 16617 *
16620 16618 * Arguments: un - ptr to the sd_lun softstate struct
16621 16619 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16622 16620 *
16623 16621 * Context: May be called from interrupt context
16624 16622 */
16625 16623
16626 16624 static void
16627 16625 sd_reduce_throttle(struct sd_lun *un, int throttle_type)
16628 16626 {
16629 16627 ASSERT(un != NULL);
16630 16628 ASSERT(mutex_owned(SD_MUTEX(un)));
16631 16629 ASSERT(un->un_ncmds_in_transport >= 0);
16632 16630
16633 16631 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16634 16632 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16635 16633 un, un->un_throttle, un->un_ncmds_in_transport);
16636 16634
16637 16635 if (un->un_throttle > 1) {
16638 16636 if (un->un_f_use_adaptive_throttle == TRUE) {
16639 16637 switch (throttle_type) {
16640 16638 case SD_THROTTLE_TRAN_BUSY:
16641 16639 if (un->un_busy_throttle == 0) {
16642 16640 un->un_busy_throttle = un->un_throttle;
16643 16641 }
16644 16642 break;
16645 16643 case SD_THROTTLE_QFULL:
16646 16644 un->un_busy_throttle = 0;
16647 16645 break;
16648 16646 default:
16649 16647 ASSERT(FALSE);
16650 16648 }
16651 16649
16652 16650 if (un->un_ncmds_in_transport > 0) {
16653 16651 un->un_throttle = un->un_ncmds_in_transport;
16654 16652 }
16655 16653
16656 16654 } else {
16657 16655 if (un->un_ncmds_in_transport == 0) {
16658 16656 un->un_throttle = 1;
16659 16657 } else {
16660 16658 un->un_throttle = un->un_ncmds_in_transport;
16661 16659 }
16662 16660 }
16663 16661 }
16664 16662
16665 16663 /* Reschedule the timeout if none is currently active */
16666 16664 if (un->un_reset_throttle_timeid == NULL) {
16667 16665 un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
16668 16666 un, SD_THROTTLE_RESET_INTERVAL);
16669 16667 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16670 16668 "sd_reduce_throttle: timeout scheduled!\n");
16671 16669 }
16672 16670
16673 16671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16674 16672 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16675 16673 }
16676 16674
16677 16675
16678 16676
16679 16677 /*
16680 16678 * Function: sd_restore_throttle
16681 16679 *
16682 16680 * Description: Callback function for timeout(9F). Resets the current
16683 16681 * value of un->un_throttle to its default.
16684 16682 *
16685 16683 * Arguments: arg - pointer to associated softstate for the device.
16686 16684 *
16687 16685 * Context: May be called from interrupt context
16688 16686 */
16689 16687
16690 16688 static void
16691 16689 sd_restore_throttle(void *arg)
16692 16690 {
16693 16691 struct sd_lun *un = arg;
16694 16692
16695 16693 ASSERT(un != NULL);
16696 16694 ASSERT(!mutex_owned(SD_MUTEX(un)));
16697 16695
16698 16696 mutex_enter(SD_MUTEX(un));
16699 16697
16700 16698 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16701 16699 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16702 16700
16703 16701 un->un_reset_throttle_timeid = NULL;
16704 16702
16705 16703 if (un->un_f_use_adaptive_throttle == TRUE) {
16706 16704 /*
16707 16705 * If un_busy_throttle is nonzero, then it contains the
16708 16706 * value that un_throttle was when we got a TRAN_BUSY back
16709 16707 * from scsi_transport(). We want to revert back to this
16710 16708 * value.
16711 16709 *
16712 16710 * In the QFULL case, the throttle limit will incrementally
16713 16711 * increase until it reaches max throttle.
16714 16712 */
16715 16713 if (un->un_busy_throttle > 0) {
16716 16714 un->un_throttle = un->un_busy_throttle;
16717 16715 un->un_busy_throttle = 0;
16718 16716 } else {
16719 16717 /*
16720 16718 * increase throttle by 10% open gate slowly, schedule
16721 16719 * another restore if saved throttle has not been
16722 16720 * reached
16723 16721 */
16724 16722 short throttle;
16725 16723 if (sd_qfull_throttle_enable) {
16726 16724 throttle = un->un_throttle +
16727 16725 max((un->un_throttle / 10), 1);
16728 16726 un->un_throttle =
16729 16727 (throttle < un->un_saved_throttle) ?
16730 16728 throttle : un->un_saved_throttle;
16731 16729 if (un->un_throttle < un->un_saved_throttle) {
16732 16730 un->un_reset_throttle_timeid =
16733 16731 timeout(sd_restore_throttle,
16734 16732 un,
16735 16733 SD_QFULL_THROTTLE_RESET_INTERVAL);
16736 16734 }
16737 16735 }
16738 16736 }
16739 16737
16740 16738 /*
16741 16739 * If un_throttle has fallen below the low-water mark, we
16742 16740 * restore the maximum value here (and allow it to ratchet
16743 16741 * down again if necessary).
16744 16742 */
16745 16743 if (un->un_throttle < un->un_min_throttle) {
16746 16744 un->un_throttle = un->un_saved_throttle;
16747 16745 }
16748 16746 } else {
16749 16747 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16750 16748 "restoring limit from 0x%x to 0x%x\n",
16751 16749 un->un_throttle, un->un_saved_throttle);
16752 16750 un->un_throttle = un->un_saved_throttle;
16753 16751 }
16754 16752
16755 16753 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16756 16754 "sd_restore_throttle: calling sd_start_cmds!\n");
16757 16755
16758 16756 sd_start_cmds(un, NULL);
16759 16757
16760 16758 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16761 16759 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16762 16760 un, un->un_throttle);
16763 16761
16764 16762 mutex_exit(SD_MUTEX(un));
16765 16763
16766 16764 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
16767 16765 }
16768 16766
16769 16767 /*
16770 16768 * Function: sdrunout
16771 16769 *
16772 16770 * Description: Callback routine for scsi_init_pkt when a resource allocation
16773 16771 * fails.
16774 16772 *
16775 16773 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16776 16774 * soft state instance.
16777 16775 *
16778 16776 * Return Code: The scsi_init_pkt routine allows for the callback function to
16779 16777 * return a 0 indicating the callback should be rescheduled or a 1
16780 16778 * indicating not to reschedule. This routine always returns 1
16781 16779 * because the driver always provides a callback function to
16782 16780 * scsi_init_pkt. This results in a callback always being scheduled
16783 16781 * (via the scsi_init_pkt callback implementation) if a resource
16784 16782 * failure occurs.
16785 16783 *
16786 16784 * Context: This callback function may not block or call routines that block
16787 16785 *
16788 16786 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16789 16787 * request persisting at the head of the list which cannot be
16790 16788 * satisfied even after multiple retries. In the future the driver
16791 16789 * may implement some time of maximum runout count before failing
16792 16790 * an I/O.
16793 16791 */
16794 16792
16795 16793 static int
16796 16794 sdrunout(caddr_t arg)
16797 16795 {
16798 16796 struct sd_lun *un = (struct sd_lun *)arg;
16799 16797
16800 16798 ASSERT(un != NULL);
16801 16799 ASSERT(!mutex_owned(SD_MUTEX(un)));
16802 16800
16803 16801 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
16804 16802
16805 16803 mutex_enter(SD_MUTEX(un));
16806 16804 sd_start_cmds(un, NULL);
16807 16805 mutex_exit(SD_MUTEX(un));
16808 16806 /*
16809 16807 * This callback routine always returns 1 (i.e. do not reschedule)
16810 16808 * because we always specify sdrunout as the callback handler for
16811 16809 * scsi_init_pkt inside the call to sd_start_cmds.
16812 16810 */
16813 16811 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16814 16812 return (1);
16815 16813 }
16816 16814
16817 16815
16818 16816 /*
16819 16817 * Function: sdintr
16820 16818 *
16821 16819 * Description: Completion callback routine for scsi_pkt(9S) structs
16822 16820 * sent to the HBA driver via scsi_transport(9F).
16823 16821 *
16824 16822 * Context: Interrupt context
16825 16823 */
16826 16824
16827 16825 static void
16828 16826 sdintr(struct scsi_pkt *pktp)
16829 16827 {
16830 16828 struct buf *bp;
16831 16829 struct sd_xbuf *xp;
16832 16830 struct sd_lun *un;
16833 16831 size_t actual_len;
16834 16832 sd_ssc_t *sscp;
16835 16833
16836 16834 ASSERT(pktp != NULL);
16837 16835 bp = (struct buf *)pktp->pkt_private;
16838 16836 ASSERT(bp != NULL);
16839 16837 xp = SD_GET_XBUF(bp);
16840 16838 ASSERT(xp != NULL);
16841 16839 ASSERT(xp->xb_pktp != NULL);
16842 16840 un = SD_GET_UN(bp);
16843 16841 ASSERT(un != NULL);
16844 16842 ASSERT(!mutex_owned(SD_MUTEX(un)));
16845 16843
16846 16844 #ifdef SD_FAULT_INJECTION
16847 16845
16848 16846 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
16849 16847 /* SD FaultInjection */
16850 16848 sd_faultinjection(pktp);
16851 16849
16852 16850 #endif /* SD_FAULT_INJECTION */
16853 16851
16854 16852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
16855 16853 " xp:0x%p, un:0x%p\n", bp, xp, un);
16856 16854
16857 16855 mutex_enter(SD_MUTEX(un));
16858 16856
16859 16857 ASSERT(un->un_fm_private != NULL);
16860 16858 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
16861 16859 ASSERT(sscp != NULL);
16862 16860
16863 16861 /* Reduce the count of the #commands currently in transport */
16864 16862 un->un_ncmds_in_transport--;
16865 16863 ASSERT(un->un_ncmds_in_transport >= 0);
16866 16864
16867 16865 /* Increment counter to indicate that the callback routine is active */
16868 16866 un->un_in_callback++;
16869 16867
16870 16868 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
16871 16869
16872 16870 #ifdef SDDEBUG
16873 16871 if (bp == un->un_retry_bp) {
16874 16872 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16875 16873 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16876 16874 un, un->un_retry_bp, un->un_ncmds_in_transport);
16877 16875 }
16878 16876 #endif
16879 16877
16880 16878 /*
16881 16879 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16882 16880 * state if needed.
16883 16881 */
16884 16882 if (pktp->pkt_reason == CMD_DEV_GONE) {
16885 16883 /* Prevent multiple console messages for the same failure. */
16886 16884 if (un->un_last_pkt_reason != CMD_DEV_GONE) {
16887 16885 un->un_last_pkt_reason = CMD_DEV_GONE;
16888 16886 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16889 16887 "Command failed to complete...Device is gone\n");
16890 16888 }
16891 16889 if (un->un_mediastate != DKIO_DEV_GONE) {
16892 16890 un->un_mediastate = DKIO_DEV_GONE;
16893 16891 cv_broadcast(&un->un_state_cv);
16894 16892 }
16895 16893 /*
16896 16894 * If the command happens to be the REQUEST SENSE command,
16897 16895 * free up the rqs buf and fail the original command.
16898 16896 */
16899 16897 if (bp == un->un_rqs_bp) {
16900 16898 bp = sd_mark_rqs_idle(un, xp);
16901 16899 }
16902 16900 sd_return_failed_command(un, bp, EIO);
16903 16901 goto exit;
16904 16902 }
16905 16903
16906 16904 if (pktp->pkt_state & STATE_XARQ_DONE) {
16907 16905 SD_TRACE(SD_LOG_COMMON, un,
16908 16906 "sdintr: extra sense data received. pkt=%p\n", pktp);
16909 16907 }
16910 16908
16911 16909 /*
16912 16910 * First see if the pkt has auto-request sense data with it....
16913 16911 * Look at the packet state first so we don't take a performance
16914 16912 * hit looking at the arq enabled flag unless absolutely necessary.
16915 16913 */
16916 16914 if ((pktp->pkt_state & STATE_ARQ_DONE) &&
16917 16915 (un->un_f_arq_enabled == TRUE)) {
16918 16916 /*
16919 16917 * The HBA did an auto request sense for this command so check
16920 16918 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16921 16919 * driver command that should not be retried.
16922 16920 */
16923 16921 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
16924 16922 /*
16925 16923 * Save the relevant sense info into the xp for the
16926 16924 * original cmd.
16927 16925 */
16928 16926 struct scsi_arq_status *asp;
16929 16927 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
16930 16928 xp->xb_sense_status =
16931 16929 *((uchar_t *)(&(asp->sts_rqpkt_status)));
16932 16930 xp->xb_sense_state = asp->sts_rqpkt_state;
16933 16931 xp->xb_sense_resid = asp->sts_rqpkt_resid;
16934 16932 if (pktp->pkt_state & STATE_XARQ_DONE) {
16935 16933 actual_len = MAX_SENSE_LENGTH -
16936 16934 xp->xb_sense_resid;
16937 16935 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16938 16936 MAX_SENSE_LENGTH);
16939 16937 } else {
16940 16938 if (xp->xb_sense_resid > SENSE_LENGTH) {
16941 16939 actual_len = MAX_SENSE_LENGTH -
16942 16940 xp->xb_sense_resid;
16943 16941 } else {
16944 16942 actual_len = SENSE_LENGTH -
16945 16943 xp->xb_sense_resid;
16946 16944 }
16947 16945 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
16948 16946 if ((((struct uscsi_cmd *)
16949 16947 (xp->xb_pktinfo))->uscsi_rqlen) >
16950 16948 actual_len) {
16951 16949 xp->xb_sense_resid =
16952 16950 (((struct uscsi_cmd *)
16953 16951 (xp->xb_pktinfo))->
16954 16952 uscsi_rqlen) - actual_len;
16955 16953 } else {
16956 16954 xp->xb_sense_resid = 0;
16957 16955 }
16958 16956 }
16959 16957 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16960 16958 SENSE_LENGTH);
16961 16959 }
16962 16960
16963 16961 /* fail the command */
16964 16962 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16965 16963 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16966 16964 sd_return_failed_command(un, bp, EIO);
16967 16965 goto exit;
16968 16966 }
16969 16967
16970 16968 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16971 16969 /*
16972 16970 * We want to either retry or fail this command, so free
16973 16971 * the DMA resources here. If we retry the command then
16974 16972 * the DMA resources will be reallocated in sd_start_cmds().
16975 16973 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16976 16974 * causes the *entire* transfer to start over again from the
16977 16975 * beginning of the request, even for PARTIAL chunks that
16978 16976 * have already transferred successfully.
16979 16977 */
16980 16978 if ((un->un_f_is_fibre == TRUE) &&
16981 16979 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16982 16980 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
16983 16981 scsi_dmafree(pktp);
16984 16982 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16985 16983 }
16986 16984 #endif
16987 16985
16988 16986 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16989 16987 "sdintr: arq done, sd_handle_auto_request_sense\n");
16990 16988
16991 16989 sd_handle_auto_request_sense(un, bp, xp, pktp);
16992 16990 goto exit;
16993 16991 }
16994 16992
16995 16993 /* Next see if this is the REQUEST SENSE pkt for the instance */
16996 16994 if (pktp->pkt_flags & FLAG_SENSING) {
16997 16995 /* This pktp is from the unit's REQUEST_SENSE command */
16998 16996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16999 16997 "sdintr: sd_handle_request_sense\n");
17000 16998 sd_handle_request_sense(un, bp, xp, pktp);
17001 16999 goto exit;
17002 17000 }
17003 17001
17004 17002 /*
17005 17003 * Check to see if the command successfully completed as requested;
17006 17004 * this is the most common case (and also the hot performance path).
17007 17005 *
17008 17006 * Requirements for successful completion are:
17009 17007 * pkt_reason is CMD_CMPLT and packet status is status good.
17010 17008 * In addition:
17011 17009 * - A residual of zero indicates successful completion no matter what
17012 17010 * the command is.
17013 17011 * - If the residual is not zero and the command is not a read or
17014 17012 * write, then it's still defined as successful completion. In other
17015 17013 * words, if the command is a read or write the residual must be
17016 17014 * zero for successful completion.
17017 17015 * - If the residual is not zero and the command is a read or
17018 17016 * write, and it's a USCSICMD, then it's still defined as
17019 17017 * successful completion.
17020 17018 */
17021 17019 if ((pktp->pkt_reason == CMD_CMPLT) &&
17022 17020 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
17023 17021
17024 17022 /*
17025 17023 * Since this command is returned with a good status, we
17026 17024 * can reset the count for Sonoma failover.
17027 17025 */
17028 17026 un->un_sonoma_failure_count = 0;
17029 17027
17030 17028 /*
17031 17029 * Return all USCSI commands on good status
17032 17030 */
17033 17031 if (pktp->pkt_resid == 0) {
17034 17032 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17035 17033 "sdintr: returning command for resid == 0\n");
17036 17034 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
17037 17035 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
17038 17036 SD_UPDATE_B_RESID(bp, pktp);
17039 17037 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17040 17038 "sdintr: returning command for resid != 0\n");
17041 17039 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17042 17040 SD_UPDATE_B_RESID(bp, pktp);
17043 17041 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17044 17042 "sdintr: returning uscsi command\n");
17045 17043 } else {
17046 17044 goto not_successful;
17047 17045 }
17048 17046 sd_return_command(un, bp);
17049 17047
17050 17048 /*
17051 17049 * Decrement counter to indicate that the callback routine
17052 17050 * is done.
17053 17051 */
17054 17052 un->un_in_callback--;
17055 17053 ASSERT(un->un_in_callback >= 0);
17056 17054 mutex_exit(SD_MUTEX(un));
17057 17055
17058 17056 return;
17059 17057 }
17060 17058
17061 17059 not_successful:
17062 17060
17063 17061 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17064 17062 /*
17065 17063 * The following is based upon knowledge of the underlying transport
17066 17064 * and its use of DMA resources. This code should be removed when
17067 17065 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17068 17066 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17069 17067 * and sd_start_cmds().
17070 17068 *
17071 17069 * Free any DMA resources associated with this command if there
17072 17070 * is a chance it could be retried or enqueued for later retry.
17073 17071 * If we keep the DMA binding then mpxio cannot reissue the
17074 17072 * command on another path whenever a path failure occurs.
17075 17073 *
17076 17074 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17077 17075 * causes the *entire* transfer to start over again from the
17078 17076 * beginning of the request, even for PARTIAL chunks that
17079 17077 * have already transferred successfully.
17080 17078 *
17081 17079 * This is only done for non-uscsi commands (and also skipped for the
17082 17080 * driver's internal RQS command). Also just do this for Fibre Channel
17083 17081 * devices as these are the only ones that support mpxio.
17084 17082 */
17085 17083 if ((un->un_f_is_fibre == TRUE) &&
17086 17084 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17087 17085 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
17088 17086 scsi_dmafree(pktp);
17089 17087 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17090 17088 }
17091 17089 #endif
17092 17090
17093 17091 /*
17094 17092 * The command did not successfully complete as requested so check
17095 17093 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17096 17094 * driver command that should not be retried so just return. If
17097 17095 * FLAG_DIAGNOSE is not set the error will be processed below.
17098 17096 */
17099 17097 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17100 17098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17101 17099 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17102 17100 /*
17103 17101 * Issue a request sense if a check condition caused the error
17104 17102 * (we handle the auto request sense case above), otherwise
17105 17103 * just fail the command.
17106 17104 */
17107 17105 if ((pktp->pkt_reason == CMD_CMPLT) &&
17108 17106 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17109 17107 sd_send_request_sense_command(un, bp, pktp);
17110 17108 } else {
17111 17109 sd_return_failed_command(un, bp, EIO);
17112 17110 }
17113 17111 goto exit;
17114 17112 }
17115 17113
17116 17114 /*
17117 17115 * The command did not successfully complete as requested so process
17118 17116 * the error, retry, and/or attempt recovery.
17119 17117 */
17120 17118 switch (pktp->pkt_reason) {
17121 17119 case CMD_CMPLT:
17122 17120 switch (SD_GET_PKT_STATUS(pktp)) {
17123 17121 case STATUS_GOOD:
17124 17122 /*
17125 17123 * The command completed successfully with a non-zero
17126 17124 * residual
17127 17125 */
17128 17126 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17129 17127 "sdintr: STATUS_GOOD \n");
17130 17128 sd_pkt_status_good(un, bp, xp, pktp);
17131 17129 break;
17132 17130
17133 17131 case STATUS_CHECK:
17134 17132 case STATUS_TERMINATED:
17135 17133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17136 17134 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17137 17135 sd_pkt_status_check_condition(un, bp, xp, pktp);
17138 17136 break;
17139 17137
17140 17138 case STATUS_BUSY:
17141 17139 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17142 17140 "sdintr: STATUS_BUSY\n");
17143 17141 sd_pkt_status_busy(un, bp, xp, pktp);
17144 17142 break;
17145 17143
17146 17144 case STATUS_RESERVATION_CONFLICT:
17147 17145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17148 17146 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17149 17147 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17150 17148 break;
17151 17149
17152 17150 case STATUS_QFULL:
17153 17151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17154 17152 "sdintr: STATUS_QFULL\n");
17155 17153 sd_pkt_status_qfull(un, bp, xp, pktp);
17156 17154 break;
17157 17155
17158 17156 case STATUS_MET:
17159 17157 case STATUS_INTERMEDIATE:
17160 17158 case STATUS_SCSI2:
17161 17159 case STATUS_INTERMEDIATE_MET:
17162 17160 case STATUS_ACA_ACTIVE:
17163 17161 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17164 17162 "Unexpected SCSI status received: 0x%x\n",
17165 17163 SD_GET_PKT_STATUS(pktp));
17166 17164 /*
17167 17165 * Mark the ssc_flags when detected invalid status
17168 17166 * code for non-USCSI command.
17169 17167 */
17170 17168 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17171 17169 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17172 17170 0, "stat-code");
17173 17171 }
17174 17172 sd_return_failed_command(un, bp, EIO);
17175 17173 break;
17176 17174
17177 17175 default:
17178 17176 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17179 17177 "Invalid SCSI status received: 0x%x\n",
17180 17178 SD_GET_PKT_STATUS(pktp));
17181 17179 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17182 17180 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17183 17181 0, "stat-code");
17184 17182 }
17185 17183 sd_return_failed_command(un, bp, EIO);
17186 17184 break;
17187 17185
17188 17186 }
17189 17187 break;
17190 17188
17191 17189 case CMD_INCOMPLETE:
17192 17190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17193 17191 "sdintr: CMD_INCOMPLETE\n");
17194 17192 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
17195 17193 break;
17196 17194 case CMD_TRAN_ERR:
17197 17195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17198 17196 "sdintr: CMD_TRAN_ERR\n");
17199 17197 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
17200 17198 break;
17201 17199 case CMD_RESET:
17202 17200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17203 17201 "sdintr: CMD_RESET \n");
17204 17202 sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
17205 17203 break;
17206 17204 case CMD_ABORTED:
17207 17205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17208 17206 "sdintr: CMD_ABORTED \n");
17209 17207 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
17210 17208 break;
17211 17209 case CMD_TIMEOUT:
17212 17210 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17213 17211 "sdintr: CMD_TIMEOUT\n");
17214 17212 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
17215 17213 break;
17216 17214 case CMD_UNX_BUS_FREE:
17217 17215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17218 17216 "sdintr: CMD_UNX_BUS_FREE \n");
17219 17217 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
17220 17218 break;
17221 17219 case CMD_TAG_REJECT:
17222 17220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17223 17221 "sdintr: CMD_TAG_REJECT\n");
17224 17222 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
17225 17223 break;
17226 17224 default:
17227 17225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17228 17226 "sdintr: default\n");
17229 17227 /*
17230 17228 * Mark the ssc_flags for detecting invliad pkt_reason.
17231 17229 */
17232 17230 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17233 17231 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
17234 17232 0, "pkt-reason");
17235 17233 }
17236 17234 sd_pkt_reason_default(un, bp, xp, pktp);
17237 17235 break;
17238 17236 }
17239 17237
17240 17238 exit:
17241 17239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
17242 17240
17243 17241 /* Decrement counter to indicate that the callback routine is done. */
17244 17242 un->un_in_callback--;
17245 17243 ASSERT(un->un_in_callback >= 0);
17246 17244
17247 17245 /*
17248 17246 * At this point, the pkt has been dispatched, ie, it is either
17249 17247 * being re-tried or has been returned to its caller and should
17250 17248 * not be referenced.
17251 17249 */
17252 17250
17253 17251 mutex_exit(SD_MUTEX(un));
17254 17252 }
17255 17253
17256 17254
17257 17255 /*
17258 17256 * Function: sd_print_incomplete_msg
17259 17257 *
17260 17258 * Description: Prints the error message for a CMD_INCOMPLETE error.
17261 17259 *
17262 17260 * Arguments: un - ptr to associated softstate for the device.
17263 17261 * bp - ptr to the buf(9S) for the command.
17264 17262 * arg - message string ptr
17265 17263 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17266 17264 * or SD_NO_RETRY_ISSUED.
17267 17265 *
17268 17266 * Context: May be called under interrupt context
17269 17267 */
17270 17268
17271 17269 static void
17272 17270 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17273 17271 {
17274 17272 struct scsi_pkt *pktp;
17275 17273 char *msgp;
17276 17274 char *cmdp = arg;
17277 17275
17278 17276 ASSERT(un != NULL);
17279 17277 ASSERT(mutex_owned(SD_MUTEX(un)));
17280 17278 ASSERT(bp != NULL);
17281 17279 ASSERT(arg != NULL);
17282 17280 pktp = SD_GET_PKTP(bp);
17283 17281 ASSERT(pktp != NULL);
17284 17282
17285 17283 switch (code) {
17286 17284 case SD_DELAYED_RETRY_ISSUED:
17287 17285 case SD_IMMEDIATE_RETRY_ISSUED:
17288 17286 msgp = "retrying";
17289 17287 break;
17290 17288 case SD_NO_RETRY_ISSUED:
17291 17289 default:
17292 17290 msgp = "giving up";
17293 17291 break;
17294 17292 }
17295 17293
17296 17294 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17297 17295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17298 17296 "incomplete %s- %s\n", cmdp, msgp);
17299 17297 }
17300 17298 }
17301 17299
17302 17300
17303 17301
17304 17302 /*
17305 17303 * Function: sd_pkt_status_good
17306 17304 *
17307 17305 * Description: Processing for a STATUS_GOOD code in pkt_status.
17308 17306 *
17309 17307 * Context: May be called under interrupt context
17310 17308 */
17311 17309
17312 17310 static void
17313 17311 sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
17314 17312 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17315 17313 {
17316 17314 char *cmdp;
17317 17315
17318 17316 ASSERT(un != NULL);
17319 17317 ASSERT(mutex_owned(SD_MUTEX(un)));
17320 17318 ASSERT(bp != NULL);
17321 17319 ASSERT(xp != NULL);
17322 17320 ASSERT(pktp != NULL);
17323 17321 ASSERT(pktp->pkt_reason == CMD_CMPLT);
17324 17322 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
17325 17323 ASSERT(pktp->pkt_resid != 0);
17326 17324
17327 17325 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
17328 17326
17329 17327 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17330 17328 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
17331 17329 case SCMD_READ:
17332 17330 cmdp = "read";
17333 17331 break;
17334 17332 case SCMD_WRITE:
17335 17333 cmdp = "write";
17336 17334 break;
17337 17335 default:
17338 17336 SD_UPDATE_B_RESID(bp, pktp);
17339 17337 sd_return_command(un, bp);
17340 17338 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17341 17339 return;
17342 17340 }
17343 17341
17344 17342 /*
17345 17343 * See if we can retry the read/write, preferrably immediately.
17346 17344 * If retries are exhaused, then sd_retry_command() will update
17347 17345 * the b_resid count.
17348 17346 */
17349 17347 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
17350 17348 cmdp, EIO, (clock_t)0, NULL);
17351 17349
17352 17350 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17353 17351 }
17354 17352
17355 17353
17356 17354
17357 17355
17358 17356
17359 17357 /*
17360 17358 * Function: sd_handle_request_sense
17361 17359 *
17362 17360 * Description: Processing for non-auto Request Sense command.
17363 17361 *
17364 17362 * Arguments: un - ptr to associated softstate
17365 17363 * sense_bp - ptr to buf(9S) for the RQS command
17366 17364 * sense_xp - ptr to the sd_xbuf for the RQS command
17367 17365 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17368 17366 *
17369 17367 * Context: May be called under interrupt context
17370 17368 */
17371 17369
17372 17370 static void
17373 17371 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
17374 17372 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
17375 17373 {
17376 17374 struct buf *cmd_bp; /* buf for the original command */
17377 17375 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */
17378 17376 struct scsi_pkt *cmd_pktp; /* pkt for the original command */
17379 17377 size_t actual_len; /* actual sense data length */
17380 17378
17381 17379 ASSERT(un != NULL);
17382 17380 ASSERT(mutex_owned(SD_MUTEX(un)));
17383 17381 ASSERT(sense_bp != NULL);
17384 17382 ASSERT(sense_xp != NULL);
17385 17383 ASSERT(sense_pktp != NULL);
17386 17384
17387 17385 /*
17388 17386 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17389 17387 * RQS command and not the original command.
17390 17388 */
17391 17389 ASSERT(sense_pktp == un->un_rqs_pktp);
17392 17390 ASSERT(sense_bp == un->un_rqs_bp);
17393 17391 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
17394 17392 (FLAG_SENSING | FLAG_HEAD));
17395 17393 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
17396 17394 FLAG_SENSING) == FLAG_SENSING);
17397 17395
17398 17396 /* These are the bp, xp, and pktp for the original command */
17399 17397 cmd_bp = sense_xp->xb_sense_bp;
17400 17398 cmd_xp = SD_GET_XBUF(cmd_bp);
17401 17399 cmd_pktp = SD_GET_PKTP(cmd_bp);
17402 17400
17403 17401 if (sense_pktp->pkt_reason != CMD_CMPLT) {
17404 17402 /*
17405 17403 * The REQUEST SENSE command failed. Release the REQUEST
17406 17404 * SENSE command for re-use, get back the bp for the original
17407 17405 * command, and attempt to re-try the original command if
17408 17406 * FLAG_DIAGNOSE is not set in the original packet.
17409 17407 */
17410 17408 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17411 17409 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17412 17410 cmd_bp = sd_mark_rqs_idle(un, sense_xp);
17413 17411 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
17414 17412 NULL, NULL, EIO, (clock_t)0, NULL);
17415 17413 return;
17416 17414 }
17417 17415 }
17418 17416
17419 17417 /*
17420 17418 * Save the relevant sense info into the xp for the original cmd.
17421 17419 *
17422 17420 * Note: if the request sense failed the state info will be zero
17423 17421 * as set in sd_mark_rqs_busy()
17424 17422 */
17425 17423 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
17426 17424 cmd_xp->xb_sense_state = sense_pktp->pkt_state;
17427 17425 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
17428 17426 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
17429 17427 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
17430 17428 SENSE_LENGTH)) {
17431 17429 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17432 17430 MAX_SENSE_LENGTH);
17433 17431 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
17434 17432 } else {
17435 17433 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17436 17434 SENSE_LENGTH);
17437 17435 if (actual_len < SENSE_LENGTH) {
17438 17436 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
17439 17437 } else {
17440 17438 cmd_xp->xb_sense_resid = 0;
17441 17439 }
17442 17440 }
17443 17441
17444 17442 /*
17445 17443 * Free up the RQS command....
17446 17444 * NOTE:
17447 17445 * Must do this BEFORE calling sd_validate_sense_data!
17448 17446 * sd_validate_sense_data may return the original command in
17449 17447 * which case the pkt will be freed and the flags can no
17450 17448 * longer be touched.
17451 17449 * SD_MUTEX is held through this process until the command
17452 17450 * is dispatched based upon the sense data, so there are
17453 17451 * no race conditions.
17454 17452 */
17455 17453 (void) sd_mark_rqs_idle(un, sense_xp);
17456 17454
17457 17455 /*
17458 17456 * For a retryable command see if we have valid sense data, if so then
17459 17457 * turn it over to sd_decode_sense() to figure out the right course of
17460 17458 * action. Just fail a non-retryable command.
17461 17459 */
17462 17460 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17463 17461 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
17464 17462 SD_SENSE_DATA_IS_VALID) {
17465 17463 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
17466 17464 }
17467 17465 } else {
17468 17466 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
17469 17467 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
17470 17468 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
17471 17469 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
17472 17470 sd_return_failed_command(un, cmd_bp, EIO);
17473 17471 }
17474 17472 }
17475 17473
17476 17474
17477 17475
17478 17476
17479 17477 /*
17480 17478 * Function: sd_handle_auto_request_sense
17481 17479 *
17482 17480 * Description: Processing for auto-request sense information.
17483 17481 *
17484 17482 * Arguments: un - ptr to associated softstate
17485 17483 * bp - ptr to buf(9S) for the command
17486 17484 * xp - ptr to the sd_xbuf for the command
17487 17485 * pktp - ptr to the scsi_pkt(9S) for the command
17488 17486 *
17489 17487 * Context: May be called under interrupt context
17490 17488 */
17491 17489
17492 17490 static void
17493 17491 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
17494 17492 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17495 17493 {
17496 17494 struct scsi_arq_status *asp;
17497 17495 size_t actual_len;
17498 17496
17499 17497 ASSERT(un != NULL);
17500 17498 ASSERT(mutex_owned(SD_MUTEX(un)));
17501 17499 ASSERT(bp != NULL);
17502 17500 ASSERT(xp != NULL);
17503 17501 ASSERT(pktp != NULL);
17504 17502 ASSERT(pktp != un->un_rqs_pktp);
17505 17503 ASSERT(bp != un->un_rqs_bp);
17506 17504
17507 17505 /*
17508 17506 * For auto-request sense, we get a scsi_arq_status back from
17509 17507 * the HBA, with the sense data in the sts_sensedata member.
17510 17508 * The pkt_scbp of the packet points to this scsi_arq_status.
17511 17509 */
17512 17510 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
17513 17511
17514 17512 if (asp->sts_rqpkt_reason != CMD_CMPLT) {
17515 17513 /*
17516 17514 * The auto REQUEST SENSE failed; see if we can re-try
17517 17515 * the original command.
17518 17516 */
17519 17517 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17520 17518 "auto request sense failed (reason=%s)\n",
17521 17519 scsi_rname(asp->sts_rqpkt_reason));
17522 17520
17523 17521 sd_reset_target(un, pktp);
17524 17522
17525 17523 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17526 17524 NULL, NULL, EIO, (clock_t)0, NULL);
17527 17525 return;
17528 17526 }
17529 17527
17530 17528 /* Save the relevant sense info into the xp for the original cmd. */
17531 17529 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
17532 17530 xp->xb_sense_state = asp->sts_rqpkt_state;
17533 17531 xp->xb_sense_resid = asp->sts_rqpkt_resid;
17534 17532 if (xp->xb_sense_state & STATE_XARQ_DONE) {
17535 17533 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17536 17534 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
17537 17535 MAX_SENSE_LENGTH);
17538 17536 } else {
17539 17537 if (xp->xb_sense_resid > SENSE_LENGTH) {
17540 17538 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17541 17539 } else {
17542 17540 actual_len = SENSE_LENGTH - xp->xb_sense_resid;
17543 17541 }
17544 17542 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17545 17543 if ((((struct uscsi_cmd *)
17546 17544 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
17547 17545 xp->xb_sense_resid = (((struct uscsi_cmd *)
17548 17546 (xp->xb_pktinfo))->uscsi_rqlen) -
17549 17547 actual_len;
17550 17548 } else {
17551 17549 xp->xb_sense_resid = 0;
17552 17550 }
17553 17551 }
17554 17552 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
17555 17553 }
17556 17554
17557 17555 /*
17558 17556 * See if we have valid sense data, if so then turn it over to
17559 17557 * sd_decode_sense() to figure out the right course of action.
17560 17558 */
17561 17559 if (sd_validate_sense_data(un, bp, xp, actual_len) ==
17562 17560 SD_SENSE_DATA_IS_VALID) {
17563 17561 sd_decode_sense(un, bp, xp, pktp);
17564 17562 }
17565 17563 }
17566 17564
17567 17565
17568 17566 /*
17569 17567 * Function: sd_print_sense_failed_msg
17570 17568 *
17571 17569 * Description: Print log message when RQS has failed.
17572 17570 *
17573 17571 * Arguments: un - ptr to associated softstate
17574 17572 * bp - ptr to buf(9S) for the command
17575 17573 * arg - generic message string ptr
17576 17574 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17577 17575 * or SD_NO_RETRY_ISSUED
17578 17576 *
17579 17577 * Context: May be called from interrupt context
17580 17578 */
17581 17579
17582 17580 static void
17583 17581 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
17584 17582 int code)
17585 17583 {
17586 17584 char *msgp = arg;
17587 17585
17588 17586 ASSERT(un != NULL);
17589 17587 ASSERT(mutex_owned(SD_MUTEX(un)));
17590 17588 ASSERT(bp != NULL);
17591 17589
17592 17590 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
17593 17591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
17594 17592 }
17595 17593 }
17596 17594
17597 17595
17598 17596 /*
17599 17597 * Function: sd_validate_sense_data
17600 17598 *
17601 17599 * Description: Check the given sense data for validity.
17602 17600 * If the sense data is not valid, the command will
17603 17601 * be either failed or retried!
17604 17602 *
17605 17603 * Return Code: SD_SENSE_DATA_IS_INVALID
17606 17604 * SD_SENSE_DATA_IS_VALID
17607 17605 *
17608 17606 * Context: May be called from interrupt context
17609 17607 */
17610 17608
17611 17609 static int
17612 17610 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17613 17611 size_t actual_len)
17614 17612 {
17615 17613 struct scsi_extended_sense *esp;
17616 17614 struct scsi_pkt *pktp;
17617 17615 char *msgp = NULL;
17618 17616 sd_ssc_t *sscp;
17619 17617
17620 17618 ASSERT(un != NULL);
17621 17619 ASSERT(mutex_owned(SD_MUTEX(un)));
17622 17620 ASSERT(bp != NULL);
17623 17621 ASSERT(bp != un->un_rqs_bp);
17624 17622 ASSERT(xp != NULL);
17625 17623 ASSERT(un->un_fm_private != NULL);
17626 17624
17627 17625 pktp = SD_GET_PKTP(bp);
17628 17626 ASSERT(pktp != NULL);
17629 17627
17630 17628 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
17631 17629 ASSERT(sscp != NULL);
17632 17630
17633 17631 /*
17634 17632 * Check the status of the RQS command (auto or manual).
17635 17633 */
17636 17634 switch (xp->xb_sense_status & STATUS_MASK) {
17637 17635 case STATUS_GOOD:
17638 17636 break;
17639 17637
17640 17638 case STATUS_RESERVATION_CONFLICT:
17641 17639 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17642 17640 return (SD_SENSE_DATA_IS_INVALID);
17643 17641
17644 17642 case STATUS_BUSY:
17645 17643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17646 17644 "Busy Status on REQUEST SENSE\n");
17647 17645 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
17648 17646 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17649 17647 return (SD_SENSE_DATA_IS_INVALID);
17650 17648
17651 17649 case STATUS_QFULL:
17652 17650 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17653 17651 "QFULL Status on REQUEST SENSE\n");
17654 17652 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
17655 17653 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17656 17654 return (SD_SENSE_DATA_IS_INVALID);
17657 17655
17658 17656 case STATUS_CHECK:
17659 17657 case STATUS_TERMINATED:
17660 17658 msgp = "Check Condition on REQUEST SENSE\n";
17661 17659 goto sense_failed;
17662 17660
17663 17661 default:
17664 17662 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
17665 17663 goto sense_failed;
17666 17664 }
17667 17665
17668 17666 /*
17669 17667 * See if we got the minimum required amount of sense data.
17670 17668 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17671 17669 * or less.
17672 17670 */
17673 17671 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
17674 17672 (actual_len == 0)) {
17675 17673 msgp = "Request Sense couldn't get sense data\n";
17676 17674 goto sense_failed;
17677 17675 }
17678 17676
17679 17677 if (actual_len < SUN_MIN_SENSE_LENGTH) {
17680 17678 msgp = "Not enough sense information\n";
17681 17679 /* Mark the ssc_flags for detecting invalid sense data */
17682 17680 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17683 17681 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17684 17682 "sense-data");
17685 17683 }
17686 17684 goto sense_failed;
17687 17685 }
17688 17686
17689 17687 /*
17690 17688 * We require the extended sense data
17691 17689 */
17692 17690 esp = (struct scsi_extended_sense *)xp->xb_sense_data;
17693 17691 if (esp->es_class != CLASS_EXTENDED_SENSE) {
17694 17692 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17695 17693 static char tmp[8];
17696 17694 static char buf[148];
17697 17695 char *p = (char *)(xp->xb_sense_data);
17698 17696 int i;
17699 17697
17700 17698 mutex_enter(&sd_sense_mutex);
17701 17699 (void) strcpy(buf, "undecodable sense information:");
17702 17700 for (i = 0; i < actual_len; i++) {
17703 17701 (void) sprintf(tmp, " 0x%x", *(p++)&0xff);
17704 17702 (void) strcpy(&buf[strlen(buf)], tmp);
17705 17703 }
17706 17704 i = strlen(buf);
17707 17705 (void) strcpy(&buf[i], "-(assumed fatal)\n");
17708 17706
17709 17707 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
17710 17708 scsi_log(SD_DEVINFO(un), sd_label,
17711 17709 CE_WARN, buf);
17712 17710 }
17713 17711 mutex_exit(&sd_sense_mutex);
17714 17712 }
17715 17713
17716 17714 /* Mark the ssc_flags for detecting invalid sense data */
17717 17715 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17718 17716 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17719 17717 "sense-data");
17720 17718 }
17721 17719
17722 17720 /* Note: Legacy behavior, fail the command with no retry */
17723 17721 sd_return_failed_command(un, bp, EIO);
17724 17722 return (SD_SENSE_DATA_IS_INVALID);
17725 17723 }
17726 17724
17727 17725 /*
17728 17726 * Check that es_code is valid (es_class concatenated with es_code
17729 17727 * make up the "response code" field. es_class will always be 7, so
17730 17728 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17731 17729 * format.
17732 17730 */
17733 17731 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
17734 17732 (esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
17735 17733 (esp->es_code != CODE_FMT_DESCR_CURRENT) &&
17736 17734 (esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
17737 17735 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
17738 17736 /* Mark the ssc_flags for detecting invalid sense data */
17739 17737 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17740 17738 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17741 17739 "sense-data");
17742 17740 }
17743 17741 goto sense_failed;
17744 17742 }
17745 17743
17746 17744 return (SD_SENSE_DATA_IS_VALID);
17747 17745
17748 17746 sense_failed:
17749 17747 /*
17750 17748 * If the request sense failed (for whatever reason), attempt
17751 17749 * to retry the original command.
17752 17750 */
17753 17751 #if defined(__i386) || defined(__amd64)
17754 17752 /*
17755 17753 * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17756 17754 * sddef.h for Sparc platform, and x86 uses 1 binary
17757 17755 * for both SCSI/FC.
17758 17756 * The SD_RETRY_DELAY value need to be adjusted here
17759 17757 * when SD_RETRY_DELAY change in sddef.h
17760 17758 */
17761 17759 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17762 17760 sd_print_sense_failed_msg, msgp, EIO,
17763 17761 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17764 17762 #else
17765 17763 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17766 17764 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17767 17765 #endif
17768 17766
17769 17767 return (SD_SENSE_DATA_IS_INVALID);
17770 17768 }
17771 17769
17772 17770 /*
17773 17771 * Function: sd_decode_sense
17774 17772 *
17775 17773 * Description: Take recovery action(s) when SCSI Sense Data is received.
17776 17774 *
17777 17775 * Context: Interrupt context.
17778 17776 */
17779 17777
17780 17778 static void
17781 17779 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17782 17780 struct scsi_pkt *pktp)
17783 17781 {
17784 17782 uint8_t sense_key;
17785 17783
17786 17784 ASSERT(un != NULL);
17787 17785 ASSERT(mutex_owned(SD_MUTEX(un)));
17788 17786 ASSERT(bp != NULL);
17789 17787 ASSERT(bp != un->un_rqs_bp);
17790 17788 ASSERT(xp != NULL);
17791 17789 ASSERT(pktp != NULL);
17792 17790
17793 17791 sense_key = scsi_sense_key(xp->xb_sense_data);
17794 17792
17795 17793 switch (sense_key) {
17796 17794 case KEY_NO_SENSE:
17797 17795 sd_sense_key_no_sense(un, bp, xp, pktp);
17798 17796 break;
17799 17797 case KEY_RECOVERABLE_ERROR:
17800 17798 sd_sense_key_recoverable_error(un, xp->xb_sense_data,
17801 17799 bp, xp, pktp);
17802 17800 break;
17803 17801 case KEY_NOT_READY:
17804 17802 sd_sense_key_not_ready(un, xp->xb_sense_data,
17805 17803 bp, xp, pktp);
17806 17804 break;
17807 17805 case KEY_MEDIUM_ERROR:
17808 17806 case KEY_HARDWARE_ERROR:
17809 17807 sd_sense_key_medium_or_hardware_error(un,
17810 17808 xp->xb_sense_data, bp, xp, pktp);
17811 17809 break;
17812 17810 case KEY_ILLEGAL_REQUEST:
17813 17811 sd_sense_key_illegal_request(un, bp, xp, pktp);
17814 17812 break;
17815 17813 case KEY_UNIT_ATTENTION:
17816 17814 sd_sense_key_unit_attention(un, xp->xb_sense_data,
17817 17815 bp, xp, pktp);
17818 17816 break;
17819 17817 case KEY_WRITE_PROTECT:
17820 17818 case KEY_VOLUME_OVERFLOW:
17821 17819 case KEY_MISCOMPARE:
17822 17820 sd_sense_key_fail_command(un, bp, xp, pktp);
17823 17821 break;
17824 17822 case KEY_BLANK_CHECK:
17825 17823 sd_sense_key_blank_check(un, bp, xp, pktp);
17826 17824 break;
17827 17825 case KEY_ABORTED_COMMAND:
17828 17826 sd_sense_key_aborted_command(un, bp, xp, pktp);
17829 17827 break;
17830 17828 case KEY_VENDOR_UNIQUE:
17831 17829 case KEY_COPY_ABORTED:
17832 17830 case KEY_EQUAL:
17833 17831 case KEY_RESERVED:
17834 17832 default:
17835 17833 sd_sense_key_default(un, xp->xb_sense_data,
17836 17834 bp, xp, pktp);
17837 17835 break;
17838 17836 }
17839 17837 }
17840 17838
17841 17839
17842 17840 /*
17843 17841 * Function: sd_dump_memory
17844 17842 *
17845 17843 * Description: Debug logging routine to print the contents of a user provided
17846 17844 * buffer. The output of the buffer is broken up into 256 byte
17847 17845 * segments due to a size constraint of the scsi_log.
17848 17846 * implementation.
17849 17847 *
17850 17848 * Arguments: un - ptr to softstate
17851 17849 * comp - component mask
17852 17850 * title - "title" string to preceed data when printed
17853 17851 * data - ptr to data block to be printed
17854 17852 * len - size of data block to be printed
17855 17853 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17856 17854 *
17857 17855 * Context: May be called from interrupt context
17858 17856 */
17859 17857
17860 17858 #define SD_DUMP_MEMORY_BUF_SIZE 256
17861 17859
17862 17860 static char *sd_dump_format_string[] = {
17863 17861 " 0x%02x",
17864 17862 " %c"
17865 17863 };
17866 17864
17867 17865 static void
17868 17866 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
17869 17867 int len, int fmt)
17870 17868 {
17871 17869 int i, j;
17872 17870 int avail_count;
17873 17871 int start_offset;
17874 17872 int end_offset;
17875 17873 size_t entry_len;
17876 17874 char *bufp;
17877 17875 char *local_buf;
17878 17876 char *format_string;
17879 17877
17880 17878 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
17881 17879
17882 17880 /*
17883 17881 * In the debug version of the driver, this function is called from a
17884 17882 * number of places which are NOPs in the release driver.
17885 17883 * The debug driver therefore has additional methods of filtering
17886 17884 * debug output.
17887 17885 */
17888 17886 #ifdef SDDEBUG
17889 17887 /*
17890 17888 * In the debug version of the driver we can reduce the amount of debug
17891 17889 * messages by setting sd_error_level to something other than
17892 17890 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17893 17891 * sd_component_mask.
17894 17892 */
17895 17893 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
17896 17894 (sd_error_level != SCSI_ERR_ALL)) {
17897 17895 return;
17898 17896 }
17899 17897 if (((sd_component_mask & comp) == 0) ||
17900 17898 (sd_error_level != SCSI_ERR_ALL)) {
17901 17899 return;
17902 17900 }
17903 17901 #else
17904 17902 if (sd_error_level != SCSI_ERR_ALL) {
17905 17903 return;
17906 17904 }
17907 17905 #endif
17908 17906
17909 17907 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
17910 17908 bufp = local_buf;
17911 17909 /*
17912 17910 * Available length is the length of local_buf[], minus the
17913 17911 * length of the title string, minus one for the ":", minus
17914 17912 * one for the newline, minus one for the NULL terminator.
17915 17913 * This gives the #bytes available for holding the printed
17916 17914 * values from the given data buffer.
17917 17915 */
17918 17916 if (fmt == SD_LOG_HEX) {
17919 17917 format_string = sd_dump_format_string[0];
17920 17918 } else /* SD_LOG_CHAR */ {
17921 17919 format_string = sd_dump_format_string[1];
17922 17920 }
17923 17921 /*
17924 17922 * Available count is the number of elements from the given
17925 17923 * data buffer that we can fit into the available length.
17926 17924 * This is based upon the size of the format string used.
17927 17925 * Make one entry and find it's size.
17928 17926 */
17929 17927 (void) sprintf(bufp, format_string, data[0]);
17930 17928 entry_len = strlen(bufp);
17931 17929 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
17932 17930
17933 17931 j = 0;
17934 17932 while (j < len) {
17935 17933 bufp = local_buf;
17936 17934 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
17937 17935 start_offset = j;
17938 17936
17939 17937 end_offset = start_offset + avail_count;
17940 17938
17941 17939 (void) sprintf(bufp, "%s:", title);
17942 17940 bufp += strlen(bufp);
17943 17941 for (i = start_offset; ((i < end_offset) && (j < len));
17944 17942 i++, j++) {
17945 17943 (void) sprintf(bufp, format_string, data[i]);
17946 17944 bufp += entry_len;
17947 17945 }
17948 17946 (void) sprintf(bufp, "\n");
17949 17947
17950 17948 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
17951 17949 }
17952 17950 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
17953 17951 }
17954 17952
17955 17953 /*
17956 17954 * Function: sd_print_sense_msg
17957 17955 *
17958 17956 * Description: Log a message based upon the given sense data.
17959 17957 *
17960 17958 * Arguments: un - ptr to associated softstate
17961 17959 * bp - ptr to buf(9S) for the command
17962 17960 * arg - ptr to associate sd_sense_info struct
17963 17961 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17964 17962 * or SD_NO_RETRY_ISSUED
17965 17963 *
17966 17964 * Context: May be called from interrupt context
17967 17965 */
17968 17966
17969 17967 static void
17970 17968 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17971 17969 {
17972 17970 struct sd_xbuf *xp;
17973 17971 struct scsi_pkt *pktp;
17974 17972 uint8_t *sensep;
17975 17973 daddr_t request_blkno;
17976 17974 diskaddr_t err_blkno;
17977 17975 int severity;
17978 17976 int pfa_flag;
17979 17977 extern struct scsi_key_strings scsi_cmds[];
17980 17978
17981 17979 ASSERT(un != NULL);
17982 17980 ASSERT(mutex_owned(SD_MUTEX(un)));
17983 17981 ASSERT(bp != NULL);
17984 17982 xp = SD_GET_XBUF(bp);
17985 17983 ASSERT(xp != NULL);
17986 17984 pktp = SD_GET_PKTP(bp);
17987 17985 ASSERT(pktp != NULL);
17988 17986 ASSERT(arg != NULL);
17989 17987
17990 17988 severity = ((struct sd_sense_info *)(arg))->ssi_severity;
17991 17989 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
17992 17990
17993 17991 if ((code == SD_DELAYED_RETRY_ISSUED) ||
17994 17992 (code == SD_IMMEDIATE_RETRY_ISSUED)) {
17995 17993 severity = SCSI_ERR_RETRYABLE;
17996 17994 }
17997 17995
17998 17996 /* Use absolute block number for the request block number */
17999 17997 request_blkno = xp->xb_blkno;
18000 17998
18001 17999 /*
18002 18000 * Now try to get the error block number from the sense data
18003 18001 */
18004 18002 sensep = xp->xb_sense_data;
18005 18003
18006 18004 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
18007 18005 (uint64_t *)&err_blkno)) {
18008 18006 /*
18009 18007 * We retrieved the error block number from the information
18010 18008 * portion of the sense data.
18011 18009 *
18012 18010 * For USCSI commands we are better off using the error
18013 18011 * block no. as the requested block no. (This is the best
18014 18012 * we can estimate.)
18015 18013 */
18016 18014 if ((SD_IS_BUFIO(xp) == FALSE) &&
18017 18015 ((pktp->pkt_flags & FLAG_SILENT) == 0)) {
18018 18016 request_blkno = err_blkno;
18019 18017 }
18020 18018 } else {
18021 18019 /*
18022 18020 * Without the es_valid bit set (for fixed format) or an
18023 18021 * information descriptor (for descriptor format) we cannot
18024 18022 * be certain of the error blkno, so just use the
18025 18023 * request_blkno.
18026 18024 */
18027 18025 err_blkno = (diskaddr_t)request_blkno;
18028 18026 }
18029 18027
18030 18028 /*
18031 18029 * The following will log the buffer contents for the release driver
18032 18030 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
18033 18031 * level is set to verbose.
18034 18032 */
18035 18033 sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
18036 18034 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
18037 18035 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
18038 18036 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
18039 18037
18040 18038 if (pfa_flag == FALSE) {
18041 18039 /* This is normally only set for USCSI */
18042 18040 if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
18043 18041 return;
18044 18042 }
18045 18043
18046 18044 if ((SD_IS_BUFIO(xp) == TRUE) &&
18047 18045 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
18048 18046 (severity < sd_error_level))) {
18049 18047 return;
18050 18048 }
18051 18049 }
18052 18050 /*
18053 18051 * Check for Sonoma Failover and keep a count of how many failed I/O's
18054 18052 */
18055 18053 if ((SD_IS_LSI(un)) &&
18056 18054 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) &&
18057 18055 (scsi_sense_asc(sensep) == 0x94) &&
18058 18056 (scsi_sense_ascq(sensep) == 0x01)) {
18059 18057 un->un_sonoma_failure_count++;
18060 18058 if (un->un_sonoma_failure_count > 1) {
18061 18059 return;
18062 18060 }
18063 18061 }
18064 18062
18065 18063 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
18066 18064 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
18067 18065 (pktp->pkt_resid == 0))) {
18068 18066 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
18069 18067 request_blkno, err_blkno, scsi_cmds,
18070 18068 (struct scsi_extended_sense *)sensep,
18071 18069 un->un_additional_codes, NULL);
18072 18070 }
18073 18071 }
18074 18072
18075 18073 /*
18076 18074 * Function: sd_sense_key_no_sense
18077 18075 *
18078 18076 * Description: Recovery action when sense data was not received.
18079 18077 *
18080 18078 * Context: May be called from interrupt context
18081 18079 */
18082 18080
18083 18081 static void
18084 18082 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
18085 18083 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18086 18084 {
18087 18085 struct sd_sense_info si;
18088 18086
18089 18087 ASSERT(un != NULL);
18090 18088 ASSERT(mutex_owned(SD_MUTEX(un)));
18091 18089 ASSERT(bp != NULL);
18092 18090 ASSERT(xp != NULL);
18093 18091 ASSERT(pktp != NULL);
18094 18092
18095 18093 si.ssi_severity = SCSI_ERR_FATAL;
18096 18094 si.ssi_pfa_flag = FALSE;
18097 18095
18098 18096 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18099 18097
18100 18098 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18101 18099 &si, EIO, (clock_t)0, NULL);
18102 18100 }
18103 18101
18104 18102
18105 18103 /*
18106 18104 * Function: sd_sense_key_recoverable_error
18107 18105 *
18108 18106 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18109 18107 *
18110 18108 * Context: May be called from interrupt context
18111 18109 */
18112 18110
18113 18111 static void
18114 18112 sd_sense_key_recoverable_error(struct sd_lun *un,
18115 18113 uint8_t *sense_datap,
18116 18114 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18117 18115 {
18118 18116 struct sd_sense_info si;
18119 18117 uint8_t asc = scsi_sense_asc(sense_datap);
18120 18118 uint8_t ascq = scsi_sense_ascq(sense_datap);
18121 18119
18122 18120 ASSERT(un != NULL);
18123 18121 ASSERT(mutex_owned(SD_MUTEX(un)));
18124 18122 ASSERT(bp != NULL);
18125 18123 ASSERT(xp != NULL);
18126 18124 ASSERT(pktp != NULL);
18127 18125
18128 18126 /*
18129 18127 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE
18130 18128 */
18131 18129 if (asc == 0x00 && ascq == 0x1D) {
18132 18130 sd_return_command(un, bp);
18133 18131 return;
18134 18132 }
18135 18133
18136 18134 /*
18137 18135 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18138 18136 */
18139 18137 if ((asc == 0x5D) && (sd_report_pfa != 0)) {
18140 18138 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18141 18139 si.ssi_severity = SCSI_ERR_INFO;
18142 18140 si.ssi_pfa_flag = TRUE;
18143 18141 } else {
18144 18142 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18145 18143 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
18146 18144 si.ssi_severity = SCSI_ERR_RECOVERED;
18147 18145 si.ssi_pfa_flag = FALSE;
18148 18146 }
18149 18147
18150 18148 if (pktp->pkt_resid == 0) {
18151 18149 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18152 18150 sd_return_command(un, bp);
18153 18151 return;
18154 18152 }
18155 18153
18156 18154 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18157 18155 &si, EIO, (clock_t)0, NULL);
18158 18156 }
18159 18157
18160 18158
18161 18159
18162 18160
18163 18161 /*
18164 18162 * Function: sd_sense_key_not_ready
18165 18163 *
18166 18164 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18167 18165 *
18168 18166 * Context: May be called from interrupt context
18169 18167 */
18170 18168
18171 18169 static void
18172 18170 sd_sense_key_not_ready(struct sd_lun *un,
18173 18171 uint8_t *sense_datap,
18174 18172 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18175 18173 {
18176 18174 struct sd_sense_info si;
18177 18175 uint8_t asc = scsi_sense_asc(sense_datap);
18178 18176 uint8_t ascq = scsi_sense_ascq(sense_datap);
18179 18177
18180 18178 ASSERT(un != NULL);
18181 18179 ASSERT(mutex_owned(SD_MUTEX(un)));
18182 18180 ASSERT(bp != NULL);
18183 18181 ASSERT(xp != NULL);
18184 18182 ASSERT(pktp != NULL);
18185 18183
18186 18184 si.ssi_severity = SCSI_ERR_FATAL;
18187 18185 si.ssi_pfa_flag = FALSE;
18188 18186
18189 18187 /*
18190 18188 * Update error stats after first NOT READY error. Disks may have
18191 18189 * been powered down and may need to be restarted. For CDROMs,
18192 18190 * report NOT READY errors only if media is present.
18193 18191 */
18194 18192 if ((ISCD(un) && (asc == 0x3A)) ||
18195 18193 (xp->xb_nr_retry_count > 0)) {
18196 18194 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18197 18195 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
18198 18196 }
18199 18197
18200 18198 /*
18201 18199 * Just fail if the "not ready" retry limit has been reached.
18202 18200 */
18203 18201 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
18204 18202 /* Special check for error message printing for removables. */
18205 18203 if (un->un_f_has_removable_media && (asc == 0x04) &&
18206 18204 (ascq >= 0x04)) {
18207 18205 si.ssi_severity = SCSI_ERR_ALL;
18208 18206 }
18209 18207 goto fail_command;
18210 18208 }
18211 18209
18212 18210 /*
18213 18211 * Check the ASC and ASCQ in the sense data as needed, to determine
18214 18212 * what to do.
18215 18213 */
18216 18214 switch (asc) {
18217 18215 case 0x04: /* LOGICAL UNIT NOT READY */
18218 18216 /*
18219 18217 * disk drives that don't spin up result in a very long delay
18220 18218 * in format without warning messages. We will log a message
18221 18219 * if the error level is set to verbose.
18222 18220 */
18223 18221 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18224 18222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18225 18223 "logical unit not ready, resetting disk\n");
18226 18224 }
18227 18225
18228 18226 /*
18229 18227 * There are different requirements for CDROMs and disks for
18230 18228 * the number of retries. If a CD-ROM is giving this, it is
18231 18229 * probably reading TOC and is in the process of getting
18232 18230 * ready, so we should keep on trying for a long time to make
18233 18231 * sure that all types of media are taken in account (for
18234 18232 * some media the drive takes a long time to read TOC). For
18235 18233 * disks we do not want to retry this too many times as this
18236 18234 * can cause a long hang in format when the drive refuses to
18237 18235 * spin up (a very common failure).
18238 18236 */
18239 18237 switch (ascq) {
18240 18238 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18241 18239 /*
18242 18240 * Disk drives frequently refuse to spin up which
18243 18241 * results in a very long hang in format without
18244 18242 * warning messages.
18245 18243 *
18246 18244 * Note: This code preserves the legacy behavior of
18247 18245 * comparing xb_nr_retry_count against zero for fibre
18248 18246 * channel targets instead of comparing against the
18249 18247 * un_reset_retry_count value. The reason for this
18250 18248 * discrepancy has been so utterly lost beneath the
18251 18249 * Sands of Time that even Indiana Jones could not
18252 18250 * find it.
18253 18251 */
18254 18252 if (un->un_f_is_fibre == TRUE) {
18255 18253 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18256 18254 (xp->xb_nr_retry_count > 0)) &&
18257 18255 (un->un_startstop_timeid == NULL)) {
18258 18256 scsi_log(SD_DEVINFO(un), sd_label,
18259 18257 CE_WARN, "logical unit not ready, "
18260 18258 "resetting disk\n");
18261 18259 sd_reset_target(un, pktp);
18262 18260 }
18263 18261 } else {
18264 18262 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18265 18263 (xp->xb_nr_retry_count >
18266 18264 un->un_reset_retry_count)) &&
18267 18265 (un->un_startstop_timeid == NULL)) {
18268 18266 scsi_log(SD_DEVINFO(un), sd_label,
18269 18267 CE_WARN, "logical unit not ready, "
18270 18268 "resetting disk\n");
18271 18269 sd_reset_target(un, pktp);
18272 18270 }
18273 18271 }
18274 18272 break;
18275 18273
18276 18274 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18277 18275 /*
18278 18276 * If the target is in the process of becoming
18279 18277 * ready, just proceed with the retry. This can
18280 18278 * happen with CD-ROMs that take a long time to
18281 18279 * read TOC after a power cycle or reset.
18282 18280 */
18283 18281 goto do_retry;
18284 18282
18285 18283 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18286 18284 break;
18287 18285
18288 18286 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18289 18287 /*
18290 18288 * Retries cannot help here so just fail right away.
18291 18289 */
18292 18290 goto fail_command;
18293 18291
18294 18292 case 0x88:
18295 18293 /*
18296 18294 * Vendor-unique code for T3/T4: it indicates a
18297 18295 * path problem in a mutipathed config, but as far as
18298 18296 * the target driver is concerned it equates to a fatal
18299 18297 * error, so we should just fail the command right away
18300 18298 * (without printing anything to the console). If this
18301 18299 * is not a T3/T4, fall thru to the default recovery
18302 18300 * action.
18303 18301 * T3/T4 is FC only, don't need to check is_fibre
18304 18302 */
18305 18303 if (SD_IS_T3(un) || SD_IS_T4(un)) {
18306 18304 sd_return_failed_command(un, bp, EIO);
18307 18305 return;
18308 18306 }
18309 18307 /* FALLTHRU */
18310 18308
18311 18309 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18312 18310 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18313 18311 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18314 18312 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18315 18313 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18316 18314 default: /* Possible future codes in SCSI spec? */
18317 18315 /*
18318 18316 * For removable-media devices, do not retry if
18319 18317 * ASCQ > 2 as these result mostly from USCSI commands
18320 18318 * on MMC devices issued to check status of an
18321 18319 * operation initiated in immediate mode. Also for
18322 18320 * ASCQ >= 4 do not print console messages as these
18323 18321 * mainly represent a user-initiated operation
18324 18322 * instead of a system failure.
18325 18323 */
18326 18324 if (un->un_f_has_removable_media) {
18327 18325 si.ssi_severity = SCSI_ERR_ALL;
18328 18326 goto fail_command;
18329 18327 }
18330 18328 break;
18331 18329 }
18332 18330
18333 18331 /*
18334 18332 * As part of our recovery attempt for the NOT READY
18335 18333 * condition, we issue a START STOP UNIT command. However
18336 18334 * we want to wait for a short delay before attempting this
18337 18335 * as there may still be more commands coming back from the
18338 18336 * target with the check condition. To do this we use
18339 18337 * timeout(9F) to call sd_start_stop_unit_callback() after
18340 18338 * the delay interval expires. (sd_start_stop_unit_callback()
18341 18339 * dispatches sd_start_stop_unit_task(), which will issue
18342 18340 * the actual START STOP UNIT command. The delay interval
18343 18341 * is one-half of the delay that we will use to retry the
18344 18342 * command that generated the NOT READY condition.
18345 18343 *
18346 18344 * Note that we could just dispatch sd_start_stop_unit_task()
18347 18345 * from here and allow it to sleep for the delay interval,
18348 18346 * but then we would be tying up the taskq thread
18349 18347 * uncesessarily for the duration of the delay.
18350 18348 *
18351 18349 * Do not issue the START STOP UNIT if the current command
18352 18350 * is already a START STOP UNIT.
18353 18351 */
18354 18352 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
18355 18353 break;
18356 18354 }
18357 18355
18358 18356 /*
18359 18357 * Do not schedule the timeout if one is already pending.
18360 18358 */
18361 18359 if (un->un_startstop_timeid != NULL) {
18362 18360 SD_INFO(SD_LOG_ERROR, un,
18363 18361 "sd_sense_key_not_ready: restart already issued to"
18364 18362 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
18365 18363 ddi_get_instance(SD_DEVINFO(un)));
18366 18364 break;
18367 18365 }
18368 18366
18369 18367 /*
18370 18368 * Schedule the START STOP UNIT command, then queue the command
18371 18369 * for a retry.
18372 18370 *
18373 18371 * Note: A timeout is not scheduled for this retry because we
18374 18372 * want the retry to be serial with the START_STOP_UNIT. The
18375 18373 * retry will be started when the START_STOP_UNIT is completed
18376 18374 * in sd_start_stop_unit_task.
18377 18375 */
18378 18376 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
18379 18377 un, un->un_busy_timeout / 2);
18380 18378 xp->xb_nr_retry_count++;
18381 18379 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
18382 18380 return;
18383 18381
18384 18382 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18385 18383 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18386 18384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18387 18385 "unit does not respond to selection\n");
18388 18386 }
18389 18387 break;
18390 18388
18391 18389 case 0x3A: /* MEDIUM NOT PRESENT */
18392 18390 if (sd_error_level >= SCSI_ERR_FATAL) {
18393 18391 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18394 18392 "Caddy not inserted in drive\n");
18395 18393 }
18396 18394
18397 18395 sr_ejected(un);
18398 18396 un->un_mediastate = DKIO_EJECTED;
18399 18397 /* The state has changed, inform the media watch routines */
18400 18398 cv_broadcast(&un->un_state_cv);
18401 18399 /* Just fail if no media is present in the drive. */
18402 18400 goto fail_command;
18403 18401
18404 18402 default:
18405 18403 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18406 18404 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
18407 18405 "Unit not Ready. Additional sense code 0x%x\n",
18408 18406 asc);
18409 18407 }
18410 18408 break;
18411 18409 }
18412 18410
18413 18411 do_retry:
18414 18412
18415 18413 /*
18416 18414 * Retry the command, as some targets may report NOT READY for
18417 18415 * several seconds after being reset.
18418 18416 */
18419 18417 xp->xb_nr_retry_count++;
18420 18418 si.ssi_severity = SCSI_ERR_RETRYABLE;
18421 18419 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
18422 18420 &si, EIO, un->un_busy_timeout, NULL);
18423 18421
18424 18422 return;
18425 18423
18426 18424 fail_command:
18427 18425 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18428 18426 sd_return_failed_command(un, bp, EIO);
18429 18427 }
18430 18428
18431 18429
18432 18430
18433 18431 /*
18434 18432 * Function: sd_sense_key_medium_or_hardware_error
18435 18433 *
18436 18434 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18437 18435 * sense key.
18438 18436 *
18439 18437 * Context: May be called from interrupt context
18440 18438 */
18441 18439
18442 18440 static void
18443 18441 sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
18444 18442 uint8_t *sense_datap,
18445 18443 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18446 18444 {
18447 18445 struct sd_sense_info si;
18448 18446 uint8_t sense_key = scsi_sense_key(sense_datap);
18449 18447 uint8_t asc = scsi_sense_asc(sense_datap);
18450 18448
18451 18449 ASSERT(un != NULL);
18452 18450 ASSERT(mutex_owned(SD_MUTEX(un)));
18453 18451 ASSERT(bp != NULL);
18454 18452 ASSERT(xp != NULL);
18455 18453 ASSERT(pktp != NULL);
18456 18454
18457 18455 si.ssi_severity = SCSI_ERR_FATAL;
18458 18456 si.ssi_pfa_flag = FALSE;
18459 18457
18460 18458 if (sense_key == KEY_MEDIUM_ERROR) {
18461 18459 SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
18462 18460 }
18463 18461
18464 18462 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18465 18463
18466 18464 if ((un->un_reset_retry_count != 0) &&
18467 18465 (xp->xb_retry_count == un->un_reset_retry_count)) {
18468 18466 mutex_exit(SD_MUTEX(un));
18469 18467 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18470 18468 if (un->un_f_allow_bus_device_reset == TRUE) {
18471 18469
18472 18470 boolean_t try_resetting_target = B_TRUE;
18473 18471
18474 18472 /*
18475 18473 * We need to be able to handle specific ASC when we are
18476 18474 * handling a KEY_HARDWARE_ERROR. In particular
18477 18475 * taking the default action of resetting the target may
18478 18476 * not be the appropriate way to attempt recovery.
18479 18477 * Resetting a target because of a single LUN failure
18480 18478 * victimizes all LUNs on that target.
18481 18479 *
18482 18480 * This is true for the LSI arrays, if an LSI
18483 18481 * array controller returns an ASC of 0x84 (LUN Dead) we
18484 18482 * should trust it.
18485 18483 */
18486 18484
18487 18485 if (sense_key == KEY_HARDWARE_ERROR) {
18488 18486 switch (asc) {
18489 18487 case 0x84:
18490 18488 if (SD_IS_LSI(un)) {
18491 18489 try_resetting_target = B_FALSE;
18492 18490 }
18493 18491 break;
18494 18492 default:
18495 18493 break;
18496 18494 }
18497 18495 }
18498 18496
18499 18497 if (try_resetting_target == B_TRUE) {
18500 18498 int reset_retval = 0;
18501 18499 if (un->un_f_lun_reset_enabled == TRUE) {
18502 18500 SD_TRACE(SD_LOG_IO_CORE, un,
18503 18501 "sd_sense_key_medium_or_hardware_"
18504 18502 "error: issuing RESET_LUN\n");
18505 18503 reset_retval =
18506 18504 scsi_reset(SD_ADDRESS(un),
18507 18505 RESET_LUN);
18508 18506 }
18509 18507 if (reset_retval == 0) {
18510 18508 SD_TRACE(SD_LOG_IO_CORE, un,
18511 18509 "sd_sense_key_medium_or_hardware_"
18512 18510 "error: issuing RESET_TARGET\n");
18513 18511 (void) scsi_reset(SD_ADDRESS(un),
18514 18512 RESET_TARGET);
18515 18513 }
18516 18514 }
18517 18515 }
18518 18516 mutex_enter(SD_MUTEX(un));
18519 18517 }
18520 18518
18521 18519 /*
18522 18520 * This really ought to be a fatal error, but we will retry anyway
18523 18521 * as some drives report this as a spurious error.
18524 18522 */
18525 18523 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18526 18524 &si, EIO, (clock_t)0, NULL);
18527 18525 }
18528 18526
18529 18527
18530 18528
18531 18529 /*
18532 18530 * Function: sd_sense_key_illegal_request
18533 18531 *
18534 18532 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18535 18533 *
18536 18534 * Context: May be called from interrupt context
18537 18535 */
18538 18536
18539 18537 static void
18540 18538 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
18541 18539 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18542 18540 {
18543 18541 struct sd_sense_info si;
18544 18542
18545 18543 ASSERT(un != NULL);
18546 18544 ASSERT(mutex_owned(SD_MUTEX(un)));
18547 18545 ASSERT(bp != NULL);
18548 18546 ASSERT(xp != NULL);
18549 18547 ASSERT(pktp != NULL);
18550 18548
18551 18549 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
18552 18550
18553 18551 si.ssi_severity = SCSI_ERR_INFO;
18554 18552 si.ssi_pfa_flag = FALSE;
18555 18553
18556 18554 /* Pointless to retry if the target thinks it's an illegal request */
18557 18555 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18558 18556 sd_return_failed_command(un, bp, EIO);
18559 18557 }
18560 18558
18561 18559
18562 18560
18563 18561
18564 18562 /*
18565 18563 * Function: sd_sense_key_unit_attention
18566 18564 *
18567 18565 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18568 18566 *
18569 18567 * Context: May be called from interrupt context
18570 18568 */
18571 18569
18572 18570 static void
18573 18571 sd_sense_key_unit_attention(struct sd_lun *un,
18574 18572 uint8_t *sense_datap,
18575 18573 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18576 18574 {
18577 18575 /*
18578 18576 * For UNIT ATTENTION we allow retries for one minute. Devices
18579 18577 * like Sonoma can return UNIT ATTENTION close to a minute
18580 18578 * under certain conditions.
18581 18579 */
18582 18580 int retry_check_flag = SD_RETRIES_UA;
18583 18581 boolean_t kstat_updated = B_FALSE;
18584 18582 struct sd_sense_info si;
18585 18583 uint8_t asc = scsi_sense_asc(sense_datap);
18586 18584 uint8_t ascq = scsi_sense_ascq(sense_datap);
18587 18585
18588 18586 ASSERT(un != NULL);
18589 18587 ASSERT(mutex_owned(SD_MUTEX(un)));
18590 18588 ASSERT(bp != NULL);
18591 18589 ASSERT(xp != NULL);
18592 18590 ASSERT(pktp != NULL);
18593 18591
18594 18592 si.ssi_severity = SCSI_ERR_INFO;
18595 18593 si.ssi_pfa_flag = FALSE;
18596 18594
18597 18595
18598 18596 switch (asc) {
18599 18597 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18600 18598 if (sd_report_pfa != 0) {
18601 18599 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18602 18600 si.ssi_pfa_flag = TRUE;
18603 18601 retry_check_flag = SD_RETRIES_STANDARD;
18604 18602 goto do_retry;
18605 18603 }
18606 18604
18607 18605 break;
18608 18606
18609 18607 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18610 18608 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
18611 18609 un->un_resvd_status |=
18612 18610 (SD_LOST_RESERVE | SD_WANT_RESERVE);
18613 18611 }
18614 18612 #ifdef _LP64
18615 18613 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
18616 18614 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
18617 18615 un, KM_NOSLEEP) == 0) {
18618 18616 /*
18619 18617 * If we can't dispatch the task we'll just
18620 18618 * live without descriptor sense. We can
18621 18619 * try again on the next "unit attention"
18622 18620 */
18623 18621 SD_ERROR(SD_LOG_ERROR, un,
18624 18622 "sd_sense_key_unit_attention: "
18625 18623 "Could not dispatch "
18626 18624 "sd_reenable_dsense_task\n");
18627 18625 }
18628 18626 }
18629 18627 #endif /* _LP64 */
18630 18628 /* FALLTHRU */
18631 18629
18632 18630 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18633 18631 if (!un->un_f_has_removable_media) {
18634 18632 break;
18635 18633 }
18636 18634
18637 18635 /*
18638 18636 * When we get a unit attention from a removable-media device,
18639 18637 * it may be in a state that will take a long time to recover
18640 18638 * (e.g., from a reset). Since we are executing in interrupt
18641 18639 * context here, we cannot wait around for the device to come
18642 18640 * back. So hand this command off to sd_media_change_task()
18643 18641 * for deferred processing under taskq thread context. (Note
18644 18642 * that the command still may be failed if a problem is
18645 18643 * encountered at a later time.)
18646 18644 */
18647 18645 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
18648 18646 KM_NOSLEEP) == 0) {
18649 18647 /*
18650 18648 * Cannot dispatch the request so fail the command.
18651 18649 */
18652 18650 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18653 18651 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18654 18652 si.ssi_severity = SCSI_ERR_FATAL;
18655 18653 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18656 18654 sd_return_failed_command(un, bp, EIO);
18657 18655 }
18658 18656
18659 18657 /*
18660 18658 * If failed to dispatch sd_media_change_task(), we already
18661 18659 * updated kstat. If succeed to dispatch sd_media_change_task(),
18662 18660 * we should update kstat later if it encounters an error. So,
18663 18661 * we update kstat_updated flag here.
18664 18662 */
18665 18663 kstat_updated = B_TRUE;
18666 18664
18667 18665 /*
18668 18666 * Either the command has been successfully dispatched to a
18669 18667 * task Q for retrying, or the dispatch failed. In either case
18670 18668 * do NOT retry again by calling sd_retry_command. This sets up
18671 18669 * two retries of the same command and when one completes and
18672 18670 * frees the resources the other will access freed memory,
18673 18671 * a bad thing.
18674 18672 */
18675 18673 return;
18676 18674
18677 18675 default:
18678 18676 break;
18679 18677 }
18680 18678
18681 18679 /*
18682 18680 * ASC ASCQ
18683 18681 * 2A 09 Capacity data has changed
18684 18682 * 2A 01 Mode parameters changed
18685 18683 * 3F 0E Reported luns data has changed
18686 18684 * Arrays that support logical unit expansion should report
18687 18685 * capacity changes(2Ah/09). Mode parameters changed and
18688 18686 * reported luns data has changed are the approximation.
18689 18687 */
18690 18688 if (((asc == 0x2a) && (ascq == 0x09)) ||
18691 18689 ((asc == 0x2a) && (ascq == 0x01)) ||
18692 18690 ((asc == 0x3f) && (ascq == 0x0e))) {
18693 18691 if (taskq_dispatch(sd_tq, sd_target_change_task, un,
18694 18692 KM_NOSLEEP) == 0) {
18695 18693 SD_ERROR(SD_LOG_ERROR, un,
18696 18694 "sd_sense_key_unit_attention: "
18697 18695 "Could not dispatch sd_target_change_task\n");
18698 18696 }
18699 18697 }
18700 18698
18701 18699 /*
18702 18700 * Update kstat if we haven't done that.
18703 18701 */
18704 18702 if (!kstat_updated) {
18705 18703 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18706 18704 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18707 18705 }
18708 18706
18709 18707 do_retry:
18710 18708 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
18711 18709 EIO, SD_UA_RETRY_DELAY, NULL);
18712 18710 }
18713 18711
18714 18712
18715 18713
18716 18714 /*
18717 18715 * Function: sd_sense_key_fail_command
18718 18716 *
18719 18717 * Description: Use to fail a command when we don't like the sense key that
18720 18718 * was returned.
18721 18719 *
18722 18720 * Context: May be called from interrupt context
18723 18721 */
18724 18722
18725 18723 static void
18726 18724 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
18727 18725 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18728 18726 {
18729 18727 struct sd_sense_info si;
18730 18728
18731 18729 ASSERT(un != NULL);
18732 18730 ASSERT(mutex_owned(SD_MUTEX(un)));
18733 18731 ASSERT(bp != NULL);
18734 18732 ASSERT(xp != NULL);
18735 18733 ASSERT(pktp != NULL);
18736 18734
18737 18735 si.ssi_severity = SCSI_ERR_FATAL;
18738 18736 si.ssi_pfa_flag = FALSE;
18739 18737
18740 18738 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18741 18739 sd_return_failed_command(un, bp, EIO);
18742 18740 }
18743 18741
18744 18742
18745 18743
18746 18744 /*
18747 18745 * Function: sd_sense_key_blank_check
18748 18746 *
18749 18747 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18750 18748 * Has no monetary connotation.
18751 18749 *
18752 18750 * Context: May be called from interrupt context
18753 18751 */
18754 18752
18755 18753 static void
18756 18754 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
18757 18755 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18758 18756 {
18759 18757 struct sd_sense_info si;
18760 18758
18761 18759 ASSERT(un != NULL);
18762 18760 ASSERT(mutex_owned(SD_MUTEX(un)));
18763 18761 ASSERT(bp != NULL);
18764 18762 ASSERT(xp != NULL);
18765 18763 ASSERT(pktp != NULL);
18766 18764
18767 18765 /*
18768 18766 * Blank check is not fatal for removable devices, therefore
18769 18767 * it does not require a console message.
18770 18768 */
18771 18769 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
18772 18770 SCSI_ERR_FATAL;
18773 18771 si.ssi_pfa_flag = FALSE;
18774 18772
18775 18773 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18776 18774 sd_return_failed_command(un, bp, EIO);
18777 18775 }
18778 18776
18779 18777
18780 18778
18781 18779
18782 18780 /*
18783 18781 * Function: sd_sense_key_aborted_command
18784 18782 *
18785 18783 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18786 18784 *
18787 18785 * Context: May be called from interrupt context
18788 18786 */
18789 18787
18790 18788 static void
18791 18789 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
18792 18790 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18793 18791 {
18794 18792 struct sd_sense_info si;
18795 18793
18796 18794 ASSERT(un != NULL);
18797 18795 ASSERT(mutex_owned(SD_MUTEX(un)));
18798 18796 ASSERT(bp != NULL);
18799 18797 ASSERT(xp != NULL);
18800 18798 ASSERT(pktp != NULL);
18801 18799
18802 18800 si.ssi_severity = SCSI_ERR_FATAL;
18803 18801 si.ssi_pfa_flag = FALSE;
18804 18802
18805 18803 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18806 18804
18807 18805 /*
18808 18806 * This really ought to be a fatal error, but we will retry anyway
18809 18807 * as some drives report this as a spurious error.
18810 18808 */
18811 18809 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18812 18810 &si, EIO, drv_usectohz(100000), NULL);
18813 18811 }
18814 18812
18815 18813
18816 18814
18817 18815 /*
18818 18816 * Function: sd_sense_key_default
18819 18817 *
18820 18818 * Description: Default recovery action for several SCSI sense keys (basically
18821 18819 * attempts a retry).
18822 18820 *
18823 18821 * Context: May be called from interrupt context
18824 18822 */
18825 18823
18826 18824 static void
18827 18825 sd_sense_key_default(struct sd_lun *un,
18828 18826 uint8_t *sense_datap,
18829 18827 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18830 18828 {
18831 18829 struct sd_sense_info si;
18832 18830 uint8_t sense_key = scsi_sense_key(sense_datap);
18833 18831
18834 18832 ASSERT(un != NULL);
18835 18833 ASSERT(mutex_owned(SD_MUTEX(un)));
18836 18834 ASSERT(bp != NULL);
18837 18835 ASSERT(xp != NULL);
18838 18836 ASSERT(pktp != NULL);
18839 18837
18840 18838 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18841 18839
18842 18840 /*
18843 18841 * Undecoded sense key. Attempt retries and hope that will fix
18844 18842 * the problem. Otherwise, we're dead.
18845 18843 */
18846 18844 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
18847 18845 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18848 18846 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
18849 18847 }
18850 18848
18851 18849 si.ssi_severity = SCSI_ERR_FATAL;
18852 18850 si.ssi_pfa_flag = FALSE;
18853 18851
18854 18852 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18855 18853 &si, EIO, (clock_t)0, NULL);
18856 18854 }
18857 18855
18858 18856
18859 18857
18860 18858 /*
18861 18859 * Function: sd_print_retry_msg
18862 18860 *
18863 18861 * Description: Print a message indicating the retry action being taken.
18864 18862 *
18865 18863 * Arguments: un - ptr to associated softstate
18866 18864 * bp - ptr to buf(9S) for the command
18867 18865 * arg - not used.
18868 18866 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18869 18867 * or SD_NO_RETRY_ISSUED
18870 18868 *
18871 18869 * Context: May be called from interrupt context
18872 18870 */
18873 18871 /* ARGSUSED */
18874 18872 static void
18875 18873 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
18876 18874 {
18877 18875 struct sd_xbuf *xp;
18878 18876 struct scsi_pkt *pktp;
18879 18877 char *reasonp;
18880 18878 char *msgp;
18881 18879
18882 18880 ASSERT(un != NULL);
18883 18881 ASSERT(mutex_owned(SD_MUTEX(un)));
18884 18882 ASSERT(bp != NULL);
18885 18883 pktp = SD_GET_PKTP(bp);
18886 18884 ASSERT(pktp != NULL);
18887 18885 xp = SD_GET_XBUF(bp);
18888 18886 ASSERT(xp != NULL);
18889 18887
18890 18888 ASSERT(!mutex_owned(&un->un_pm_mutex));
18891 18889 mutex_enter(&un->un_pm_mutex);
18892 18890 if ((un->un_state == SD_STATE_SUSPENDED) ||
18893 18891 (SD_DEVICE_IS_IN_LOW_POWER(un)) ||
18894 18892 (pktp->pkt_flags & FLAG_SILENT)) {
18895 18893 mutex_exit(&un->un_pm_mutex);
18896 18894 goto update_pkt_reason;
18897 18895 }
18898 18896 mutex_exit(&un->un_pm_mutex);
18899 18897
18900 18898 /*
18901 18899 * Suppress messages if they are all the same pkt_reason; with
18902 18900 * TQ, many (up to 256) are returned with the same pkt_reason.
18903 18901 * If we are in panic, then suppress the retry messages.
18904 18902 */
18905 18903 switch (flag) {
18906 18904 case SD_NO_RETRY_ISSUED:
18907 18905 msgp = "giving up";
18908 18906 break;
18909 18907 case SD_IMMEDIATE_RETRY_ISSUED:
18910 18908 case SD_DELAYED_RETRY_ISSUED:
18911 18909 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
18912 18910 ((pktp->pkt_reason == un->un_last_pkt_reason) &&
18913 18911 (sd_error_level != SCSI_ERR_ALL))) {
18914 18912 return;
18915 18913 }
18916 18914 msgp = "retrying command";
18917 18915 break;
18918 18916 default:
18919 18917 goto update_pkt_reason;
18920 18918 }
18921 18919
18922 18920 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
18923 18921 scsi_rname(pktp->pkt_reason));
18924 18922
18925 18923 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
18926 18924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18927 18925 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
18928 18926 }
18929 18927
18930 18928 update_pkt_reason:
18931 18929 /*
18932 18930 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18933 18931 * This is to prevent multiple console messages for the same failure
18934 18932 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18935 18933 * when the command is retried successfully because there still may be
18936 18934 * more commands coming back with the same value of pktp->pkt_reason.
18937 18935 */
18938 18936 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
18939 18937 un->un_last_pkt_reason = pktp->pkt_reason;
18940 18938 }
18941 18939 }
18942 18940
18943 18941
18944 18942 /*
18945 18943 * Function: sd_print_cmd_incomplete_msg
18946 18944 *
18947 18945 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18948 18946 *
18949 18947 * Arguments: un - ptr to associated softstate
18950 18948 * bp - ptr to buf(9S) for the command
18951 18949 * arg - passed to sd_print_retry_msg()
18952 18950 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18953 18951 * or SD_NO_RETRY_ISSUED
18954 18952 *
18955 18953 * Context: May be called from interrupt context
18956 18954 */
18957 18955
18958 18956 static void
18959 18957 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
18960 18958 int code)
18961 18959 {
18962 18960 dev_info_t *dip;
18963 18961
18964 18962 ASSERT(un != NULL);
18965 18963 ASSERT(mutex_owned(SD_MUTEX(un)));
18966 18964 ASSERT(bp != NULL);
18967 18965
18968 18966 switch (code) {
18969 18967 case SD_NO_RETRY_ISSUED:
18970 18968 /* Command was failed. Someone turned off this target? */
18971 18969 if (un->un_state != SD_STATE_OFFLINE) {
18972 18970 /*
18973 18971 * Suppress message if we are detaching and
18974 18972 * device has been disconnected
18975 18973 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18976 18974 * private interface and not part of the DDI
18977 18975 */
18978 18976 dip = un->un_sd->sd_dev;
18979 18977 if (!(DEVI_IS_DETACHING(dip) &&
18980 18978 DEVI_IS_DEVICE_REMOVED(dip))) {
18981 18979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18982 18980 "disk not responding to selection\n");
18983 18981 }
18984 18982 New_state(un, SD_STATE_OFFLINE);
18985 18983 }
18986 18984 break;
18987 18985
18988 18986 case SD_DELAYED_RETRY_ISSUED:
18989 18987 case SD_IMMEDIATE_RETRY_ISSUED:
18990 18988 default:
18991 18989 /* Command was successfully queued for retry */
18992 18990 sd_print_retry_msg(un, bp, arg, code);
18993 18991 break;
18994 18992 }
18995 18993 }
18996 18994
18997 18995
18998 18996 /*
18999 18997 * Function: sd_pkt_reason_cmd_incomplete
19000 18998 *
19001 18999 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
19002 19000 *
19003 19001 * Context: May be called from interrupt context
19004 19002 */
19005 19003
19006 19004 static void
19007 19005 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
19008 19006 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19009 19007 {
19010 19008 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
19011 19009
19012 19010 ASSERT(un != NULL);
19013 19011 ASSERT(mutex_owned(SD_MUTEX(un)));
19014 19012 ASSERT(bp != NULL);
19015 19013 ASSERT(xp != NULL);
19016 19014 ASSERT(pktp != NULL);
19017 19015
19018 19016 /* Do not do a reset if selection did not complete */
19019 19017 /* Note: Should this not just check the bit? */
19020 19018 if (pktp->pkt_state != STATE_GOT_BUS) {
19021 19019 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19022 19020 sd_reset_target(un, pktp);
19023 19021 }
19024 19022
19025 19023 /*
19026 19024 * If the target was not successfully selected, then set
19027 19025 * SD_RETRIES_FAILFAST to indicate that we lost communication
19028 19026 * with the target, and further retries and/or commands are
19029 19027 * likely to take a long time.
19030 19028 */
19031 19029 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
19032 19030 flag |= SD_RETRIES_FAILFAST;
19033 19031 }
19034 19032
19035 19033 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19036 19034
19037 19035 sd_retry_command(un, bp, flag,
19038 19036 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19039 19037 }
19040 19038
19041 19039
19042 19040
19043 19041 /*
19044 19042 * Function: sd_pkt_reason_cmd_tran_err
19045 19043 *
19046 19044 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19047 19045 *
19048 19046 * Context: May be called from interrupt context
19049 19047 */
19050 19048
19051 19049 static void
19052 19050 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
19053 19051 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19054 19052 {
19055 19053 ASSERT(un != NULL);
19056 19054 ASSERT(mutex_owned(SD_MUTEX(un)));
19057 19055 ASSERT(bp != NULL);
19058 19056 ASSERT(xp != NULL);
19059 19057 ASSERT(pktp != NULL);
19060 19058
19061 19059 /*
19062 19060 * Do not reset if we got a parity error, or if
19063 19061 * selection did not complete.
19064 19062 */
19065 19063 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19066 19064 /* Note: Should this not just check the bit for pkt_state? */
19067 19065 if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
19068 19066 (pktp->pkt_state != STATE_GOT_BUS)) {
19069 19067 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19070 19068 sd_reset_target(un, pktp);
19071 19069 }
19072 19070
19073 19071 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19074 19072
19075 19073 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19076 19074 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19077 19075 }
19078 19076
19079 19077
19080 19078
19081 19079 /*
19082 19080 * Function: sd_pkt_reason_cmd_reset
19083 19081 *
19084 19082 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19085 19083 *
19086 19084 * Context: May be called from interrupt context
19087 19085 */
19088 19086
19089 19087 static void
19090 19088 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
19091 19089 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19092 19090 {
19093 19091 ASSERT(un != NULL);
19094 19092 ASSERT(mutex_owned(SD_MUTEX(un)));
19095 19093 ASSERT(bp != NULL);
19096 19094 ASSERT(xp != NULL);
19097 19095 ASSERT(pktp != NULL);
19098 19096
19099 19097 /* The target may still be running the command, so try to reset. */
19100 19098 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19101 19099 sd_reset_target(un, pktp);
19102 19100
19103 19101 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19104 19102
19105 19103 /*
19106 19104 * If pkt_reason is CMD_RESET chances are that this pkt got
19107 19105 * reset because another target on this bus caused it. The target
19108 19106 * that caused it should get CMD_TIMEOUT with pkt_statistics
19109 19107 * of STAT_TIMEOUT/STAT_DEV_RESET.
19110 19108 */
19111 19109
19112 19110 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19113 19111 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19114 19112 }
19115 19113
19116 19114
19117 19115
19118 19116
19119 19117 /*
19120 19118 * Function: sd_pkt_reason_cmd_aborted
19121 19119 *
19122 19120 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19123 19121 *
19124 19122 * Context: May be called from interrupt context
19125 19123 */
19126 19124
19127 19125 static void
19128 19126 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
19129 19127 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19130 19128 {
19131 19129 ASSERT(un != NULL);
19132 19130 ASSERT(mutex_owned(SD_MUTEX(un)));
19133 19131 ASSERT(bp != NULL);
19134 19132 ASSERT(xp != NULL);
19135 19133 ASSERT(pktp != NULL);
19136 19134
19137 19135 /* The target may still be running the command, so try to reset. */
19138 19136 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19139 19137 sd_reset_target(un, pktp);
19140 19138
19141 19139 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19142 19140
19143 19141 /*
19144 19142 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19145 19143 * aborted because another target on this bus caused it. The target
19146 19144 * that caused it should get CMD_TIMEOUT with pkt_statistics
19147 19145 * of STAT_TIMEOUT/STAT_DEV_RESET.
19148 19146 */
19149 19147
19150 19148 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19151 19149 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19152 19150 }
19153 19151
19154 19152
19155 19153
19156 19154 /*
19157 19155 * Function: sd_pkt_reason_cmd_timeout
19158 19156 *
19159 19157 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19160 19158 *
19161 19159 * Context: May be called from interrupt context
19162 19160 */
19163 19161
19164 19162 static void
19165 19163 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
19166 19164 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19167 19165 {
19168 19166 ASSERT(un != NULL);
19169 19167 ASSERT(mutex_owned(SD_MUTEX(un)));
19170 19168 ASSERT(bp != NULL);
19171 19169 ASSERT(xp != NULL);
19172 19170 ASSERT(pktp != NULL);
19173 19171
19174 19172
19175 19173 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19176 19174 sd_reset_target(un, pktp);
19177 19175
19178 19176 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19179 19177
19180 19178 /*
19181 19179 * A command timeout indicates that we could not establish
19182 19180 * communication with the target, so set SD_RETRIES_FAILFAST
19183 19181 * as further retries/commands are likely to take a long time.
19184 19182 */
19185 19183 sd_retry_command(un, bp,
19186 19184 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
19187 19185 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19188 19186 }
19189 19187
19190 19188
19191 19189
19192 19190 /*
19193 19191 * Function: sd_pkt_reason_cmd_unx_bus_free
19194 19192 *
19195 19193 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19196 19194 *
19197 19195 * Context: May be called from interrupt context
19198 19196 */
19199 19197
19200 19198 static void
19201 19199 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
19202 19200 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19203 19201 {
19204 19202 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
19205 19203
19206 19204 ASSERT(un != NULL);
19207 19205 ASSERT(mutex_owned(SD_MUTEX(un)));
19208 19206 ASSERT(bp != NULL);
19209 19207 ASSERT(xp != NULL);
19210 19208 ASSERT(pktp != NULL);
19211 19209
19212 19210 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19213 19211 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19214 19212
19215 19213 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19216 19214 sd_print_retry_msg : NULL;
19217 19215
19218 19216 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19219 19217 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19220 19218 }
19221 19219
19222 19220
19223 19221 /*
19224 19222 * Function: sd_pkt_reason_cmd_tag_reject
19225 19223 *
19226 19224 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19227 19225 *
19228 19226 * Context: May be called from interrupt context
19229 19227 */
19230 19228
19231 19229 static void
19232 19230 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
19233 19231 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19234 19232 {
19235 19233 ASSERT(un != NULL);
19236 19234 ASSERT(mutex_owned(SD_MUTEX(un)));
19237 19235 ASSERT(bp != NULL);
19238 19236 ASSERT(xp != NULL);
19239 19237 ASSERT(pktp != NULL);
19240 19238
19241 19239 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19242 19240 pktp->pkt_flags = 0;
19243 19241 un->un_tagflags = 0;
19244 19242 if (un->un_f_opt_queueing == TRUE) {
19245 19243 un->un_throttle = min(un->un_throttle, 3);
19246 19244 } else {
19247 19245 un->un_throttle = 1;
19248 19246 }
19249 19247 mutex_exit(SD_MUTEX(un));
19250 19248 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
19251 19249 mutex_enter(SD_MUTEX(un));
19252 19250
19253 19251 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19254 19252
19255 19253 /* Legacy behavior not to check retry counts here. */
19256 19254 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
19257 19255 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19258 19256 }
19259 19257
19260 19258
19261 19259 /*
19262 19260 * Function: sd_pkt_reason_default
19263 19261 *
19264 19262 * Description: Default recovery actions for SCSA pkt_reason values that
19265 19263 * do not have more explicit recovery actions.
19266 19264 *
19267 19265 * Context: May be called from interrupt context
19268 19266 */
19269 19267
19270 19268 static void
19271 19269 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
19272 19270 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19273 19271 {
19274 19272 ASSERT(un != NULL);
19275 19273 ASSERT(mutex_owned(SD_MUTEX(un)));
19276 19274 ASSERT(bp != NULL);
19277 19275 ASSERT(xp != NULL);
19278 19276 ASSERT(pktp != NULL);
19279 19277
19280 19278 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19281 19279 sd_reset_target(un, pktp);
19282 19280
19283 19281 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19284 19282
19285 19283 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19286 19284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19287 19285 }
19288 19286
19289 19287
19290 19288
19291 19289 /*
19292 19290 * Function: sd_pkt_status_check_condition
19293 19291 *
19294 19292 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19295 19293 *
19296 19294 * Context: May be called from interrupt context
19297 19295 */
19298 19296
19299 19297 static void
19300 19298 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
19301 19299 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19302 19300 {
19303 19301 ASSERT(un != NULL);
19304 19302 ASSERT(mutex_owned(SD_MUTEX(un)));
19305 19303 ASSERT(bp != NULL);
19306 19304 ASSERT(xp != NULL);
19307 19305 ASSERT(pktp != NULL);
19308 19306
19309 19307 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
19310 19308 "entry: buf:0x%p xp:0x%p\n", bp, xp);
19311 19309
19312 19310 /*
19313 19311 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19314 19312 * command will be retried after the request sense). Otherwise, retry
19315 19313 * the command. Note: we are issuing the request sense even though the
19316 19314 * retry limit may have been reached for the failed command.
19317 19315 */
19318 19316 if (un->un_f_arq_enabled == FALSE) {
19319 19317 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19320 19318 "no ARQ, sending request sense command\n");
19321 19319 sd_send_request_sense_command(un, bp, pktp);
19322 19320 } else {
19323 19321 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19324 19322 "ARQ,retrying request sense command\n");
19325 19323 #if defined(__i386) || defined(__amd64)
19326 19324 /*
19327 19325 * The SD_RETRY_DELAY value need to be adjusted here
19328 19326 * when SD_RETRY_DELAY change in sddef.h
19329 19327 */
19330 19328 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19331 19329 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
19332 19330 NULL);
19333 19331 #else
19334 19332 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19335 19333 EIO, SD_RETRY_DELAY, NULL);
19336 19334 #endif
19337 19335 }
19338 19336
19339 19337 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19340 19338 }
19341 19339
19342 19340
19343 19341 /*
19344 19342 * Function: sd_pkt_status_busy
19345 19343 *
19346 19344 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19347 19345 *
19348 19346 * Context: May be called from interrupt context
19349 19347 */
19350 19348
19351 19349 static void
19352 19350 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19353 19351 struct scsi_pkt *pktp)
19354 19352 {
19355 19353 ASSERT(un != NULL);
19356 19354 ASSERT(mutex_owned(SD_MUTEX(un)));
19357 19355 ASSERT(bp != NULL);
19358 19356 ASSERT(xp != NULL);
19359 19357 ASSERT(pktp != NULL);
19360 19358
19361 19359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19362 19360 "sd_pkt_status_busy: entry\n");
19363 19361
19364 19362 /* If retries are exhausted, just fail the command. */
19365 19363 if (xp->xb_retry_count >= un->un_busy_retry_count) {
19366 19364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
19367 19365 "device busy too long\n");
19368 19366 sd_return_failed_command(un, bp, EIO);
19369 19367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19370 19368 "sd_pkt_status_busy: exit\n");
19371 19369 return;
19372 19370 }
19373 19371 xp->xb_retry_count++;
19374 19372
19375 19373 /*
19376 19374 * Try to reset the target. However, we do not want to perform
19377 19375 * more than one reset if the device continues to fail. The reset
19378 19376 * will be performed when the retry count reaches the reset
19379 19377 * threshold. This threshold should be set such that at least
19380 19378 * one retry is issued before the reset is performed.
19381 19379 */
19382 19380 if (xp->xb_retry_count ==
19383 19381 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
19384 19382 int rval = 0;
19385 19383 mutex_exit(SD_MUTEX(un));
19386 19384 if (un->un_f_allow_bus_device_reset == TRUE) {
19387 19385 /*
19388 19386 * First try to reset the LUN; if we cannot then
19389 19387 * try to reset the target.
19390 19388 */
19391 19389 if (un->un_f_lun_reset_enabled == TRUE) {
19392 19390 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19393 19391 "sd_pkt_status_busy: RESET_LUN\n");
19394 19392 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19395 19393 }
19396 19394 if (rval == 0) {
19397 19395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19398 19396 "sd_pkt_status_busy: RESET_TARGET\n");
19399 19397 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19400 19398 }
19401 19399 }
19402 19400 if (rval == 0) {
19403 19401 /*
19404 19402 * If the RESET_LUN and/or RESET_TARGET failed,
19405 19403 * try RESET_ALL
19406 19404 */
19407 19405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19408 19406 "sd_pkt_status_busy: RESET_ALL\n");
19409 19407 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
19410 19408 }
19411 19409 mutex_enter(SD_MUTEX(un));
19412 19410 if (rval == 0) {
19413 19411 /*
19414 19412 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19415 19413 * At this point we give up & fail the command.
19416 19414 */
19417 19415 sd_return_failed_command(un, bp, EIO);
19418 19416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19419 19417 "sd_pkt_status_busy: exit (failed cmd)\n");
19420 19418 return;
19421 19419 }
19422 19420 }
19423 19421
19424 19422 /*
19425 19423 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19426 19424 * we have already checked the retry counts above.
19427 19425 */
19428 19426 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
19429 19427 EIO, un->un_busy_timeout, NULL);
19430 19428
19431 19429 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19432 19430 "sd_pkt_status_busy: exit\n");
19433 19431 }
19434 19432
19435 19433
19436 19434 /*
19437 19435 * Function: sd_pkt_status_reservation_conflict
19438 19436 *
19439 19437 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19440 19438 * command status.
19441 19439 *
19442 19440 * Context: May be called from interrupt context
19443 19441 */
19444 19442
19445 19443 static void
19446 19444 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
19447 19445 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19448 19446 {
19449 19447 ASSERT(un != NULL);
19450 19448 ASSERT(mutex_owned(SD_MUTEX(un)));
19451 19449 ASSERT(bp != NULL);
19452 19450 ASSERT(xp != NULL);
19453 19451 ASSERT(pktp != NULL);
19454 19452
19455 19453 /*
19456 19454 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19457 19455 * conflict could be due to various reasons like incorrect keys, not
19458 19456 * registered or not reserved etc. So, we return EACCES to the caller.
19459 19457 */
19460 19458 if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
19461 19459 int cmd = SD_GET_PKT_OPCODE(pktp);
19462 19460 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
19463 19461 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
19464 19462 sd_return_failed_command(un, bp, EACCES);
19465 19463 return;
19466 19464 }
19467 19465 }
19468 19466
19469 19467 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
19470 19468
19471 19469 if ((un->un_resvd_status & SD_FAILFAST) != 0) {
19472 19470 if (sd_failfast_enable != 0) {
19473 19471 /* By definition, we must panic here.... */
19474 19472 sd_panic_for_res_conflict(un);
19475 19473 /*NOTREACHED*/
19476 19474 }
19477 19475 SD_ERROR(SD_LOG_IO, un,
19478 19476 "sd_handle_resv_conflict: Disk Reserved\n");
19479 19477 sd_return_failed_command(un, bp, EACCES);
19480 19478 return;
19481 19479 }
19482 19480
19483 19481 /*
19484 19482 * 1147670: retry only if sd_retry_on_reservation_conflict
19485 19483 * property is set (default is 1). Retries will not succeed
19486 19484 * on a disk reserved by another initiator. HA systems
19487 19485 * may reset this via sd.conf to avoid these retries.
19488 19486 *
19489 19487 * Note: The legacy return code for this failure is EIO, however EACCES
19490 19488 * seems more appropriate for a reservation conflict.
19491 19489 */
19492 19490 if (sd_retry_on_reservation_conflict == 0) {
19493 19491 SD_ERROR(SD_LOG_IO, un,
19494 19492 "sd_handle_resv_conflict: Device Reserved\n");
19495 19493 sd_return_failed_command(un, bp, EIO);
19496 19494 return;
19497 19495 }
19498 19496
19499 19497 /*
19500 19498 * Retry the command if we can.
19501 19499 *
19502 19500 * Note: The legacy return code for this failure is EIO, however EACCES
19503 19501 * seems more appropriate for a reservation conflict.
19504 19502 */
19505 19503 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19506 19504 (clock_t)2, NULL);
19507 19505 }
19508 19506
19509 19507
19510 19508
19511 19509 /*
19512 19510 * Function: sd_pkt_status_qfull
19513 19511 *
19514 19512 * Description: Handle a QUEUE FULL condition from the target. This can
19515 19513 * occur if the HBA does not handle the queue full condition.
19516 19514 * (Basically this means third-party HBAs as Sun HBAs will
19517 19515 * handle the queue full condition.) Note that if there are
19518 19516 * some commands already in the transport, then the queue full
19519 19517 * has occurred because the queue for this nexus is actually
19520 19518 * full. If there are no commands in the transport, then the
19521 19519 * queue full is resulting from some other initiator or lun
19522 19520 * consuming all the resources at the target.
19523 19521 *
19524 19522 * Context: May be called from interrupt context
19525 19523 */
19526 19524
19527 19525 static void
19528 19526 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
19529 19527 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19530 19528 {
19531 19529 ASSERT(un != NULL);
19532 19530 ASSERT(mutex_owned(SD_MUTEX(un)));
19533 19531 ASSERT(bp != NULL);
19534 19532 ASSERT(xp != NULL);
19535 19533 ASSERT(pktp != NULL);
19536 19534
19537 19535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19538 19536 "sd_pkt_status_qfull: entry\n");
19539 19537
19540 19538 /*
19541 19539 * Just lower the QFULL throttle and retry the command. Note that
19542 19540 * we do not limit the number of retries here.
19543 19541 */
19544 19542 sd_reduce_throttle(un, SD_THROTTLE_QFULL);
19545 19543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
19546 19544 SD_RESTART_TIMEOUT, NULL);
19547 19545
19548 19546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19549 19547 "sd_pkt_status_qfull: exit\n");
19550 19548 }
19551 19549
19552 19550
19553 19551 /*
19554 19552 * Function: sd_reset_target
19555 19553 *
19556 19554 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19557 19555 * RESET_TARGET, or RESET_ALL.
19558 19556 *
19559 19557 * Context: May be called under interrupt context.
19560 19558 */
19561 19559
19562 19560 static void
19563 19561 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
19564 19562 {
19565 19563 int rval = 0;
19566 19564
19567 19565 ASSERT(un != NULL);
19568 19566 ASSERT(mutex_owned(SD_MUTEX(un)));
19569 19567 ASSERT(pktp != NULL);
19570 19568
19571 19569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
19572 19570
19573 19571 /*
19574 19572 * No need to reset if the transport layer has already done so.
19575 19573 */
19576 19574 if ((pktp->pkt_statistics &
19577 19575 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
19578 19576 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19579 19577 "sd_reset_target: no reset\n");
19580 19578 return;
19581 19579 }
19582 19580
19583 19581 mutex_exit(SD_MUTEX(un));
19584 19582
19585 19583 if (un->un_f_allow_bus_device_reset == TRUE) {
19586 19584 if (un->un_f_lun_reset_enabled == TRUE) {
19587 19585 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19588 19586 "sd_reset_target: RESET_LUN\n");
19589 19587 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19590 19588 }
19591 19589 if (rval == 0) {
19592 19590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19593 19591 "sd_reset_target: RESET_TARGET\n");
19594 19592 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19595 19593 }
19596 19594 }
19597 19595
19598 19596 if (rval == 0) {
19599 19597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19600 19598 "sd_reset_target: RESET_ALL\n");
19601 19599 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
19602 19600 }
19603 19601
19604 19602 mutex_enter(SD_MUTEX(un));
19605 19603
19606 19604 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
19607 19605 }
19608 19606
19609 19607 /*
19610 19608 * Function: sd_target_change_task
19611 19609 *
19612 19610 * Description: Handle dynamic target change
19613 19611 *
19614 19612 * Context: Executes in a taskq() thread context
19615 19613 */
19616 19614 static void
19617 19615 sd_target_change_task(void *arg)
19618 19616 {
19619 19617 struct sd_lun *un = arg;
19620 19618 uint64_t capacity;
19621 19619 diskaddr_t label_cap;
19622 19620 uint_t lbasize;
19623 19621 sd_ssc_t *ssc;
19624 19622
19625 19623 ASSERT(un != NULL);
19626 19624 ASSERT(!mutex_owned(SD_MUTEX(un)));
19627 19625
19628 19626 if ((un->un_f_blockcount_is_valid == FALSE) ||
19629 19627 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
19630 19628 return;
19631 19629 }
19632 19630
19633 19631 ssc = sd_ssc_init(un);
19634 19632
19635 19633 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
19636 19634 &lbasize, SD_PATH_DIRECT) != 0) {
19637 19635 SD_ERROR(SD_LOG_ERROR, un,
19638 19636 "sd_target_change_task: fail to read capacity\n");
19639 19637 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19640 19638 goto task_exit;
19641 19639 }
19642 19640
19643 19641 mutex_enter(SD_MUTEX(un));
19644 19642 if (capacity <= un->un_blockcount) {
19645 19643 mutex_exit(SD_MUTEX(un));
19646 19644 goto task_exit;
19647 19645 }
19648 19646
19649 19647 sd_update_block_info(un, lbasize, capacity);
19650 19648 mutex_exit(SD_MUTEX(un));
19651 19649
19652 19650 /*
19653 19651 * If lun is EFI labeled and lun capacity is greater than the
19654 19652 * capacity contained in the label, log a sys event.
19655 19653 */
19656 19654 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
19657 19655 (void*)SD_PATH_DIRECT) == 0) {
19658 19656 mutex_enter(SD_MUTEX(un));
19659 19657 if (un->un_f_blockcount_is_valid &&
19660 19658 un->un_blockcount > label_cap) {
19661 19659 mutex_exit(SD_MUTEX(un));
19662 19660 sd_log_lun_expansion_event(un, KM_SLEEP);
19663 19661 } else {
19664 19662 mutex_exit(SD_MUTEX(un));
19665 19663 }
19666 19664 }
19667 19665
19668 19666 task_exit:
19669 19667 sd_ssc_fini(ssc);
19670 19668 }
19671 19669
19672 19670
19673 19671 /*
19674 19672 * Function: sd_log_dev_status_event
19675 19673 *
19676 19674 * Description: Log EC_dev_status sysevent
19677 19675 *
19678 19676 * Context: Never called from interrupt context
19679 19677 */
19680 19678 static void
19681 19679 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
19682 19680 {
19683 19681 int err;
19684 19682 char *path;
19685 19683 nvlist_t *attr_list;
19686 19684
19687 19685 /* Allocate and build sysevent attribute list */
19688 19686 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
19689 19687 if (err != 0) {
19690 19688 SD_ERROR(SD_LOG_ERROR, un,
19691 19689 "sd_log_dev_status_event: fail to allocate space\n");
19692 19690 return;
19693 19691 }
19694 19692
19695 19693 path = kmem_alloc(MAXPATHLEN, km_flag);
19696 19694 if (path == NULL) {
19697 19695 nvlist_free(attr_list);
19698 19696 SD_ERROR(SD_LOG_ERROR, un,
19699 19697 "sd_log_dev_status_event: fail to allocate space\n");
19700 19698 return;
19701 19699 }
19702 19700 /*
19703 19701 * Add path attribute to identify the lun.
19704 19702 * We are using minor node 'a' as the sysevent attribute.
19705 19703 */
19706 19704 (void) snprintf(path, MAXPATHLEN, "/devices");
19707 19705 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path));
19708 19706 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path),
19709 19707 ":a");
19710 19708
19711 19709 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
19712 19710 if (err != 0) {
19713 19711 nvlist_free(attr_list);
19714 19712 kmem_free(path, MAXPATHLEN);
19715 19713 SD_ERROR(SD_LOG_ERROR, un,
19716 19714 "sd_log_dev_status_event: fail to add attribute\n");
19717 19715 return;
19718 19716 }
19719 19717
19720 19718 /* Log dynamic lun expansion sysevent */
19721 19719 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS,
19722 19720 esc, attr_list, NULL, km_flag);
19723 19721 if (err != DDI_SUCCESS) {
19724 19722 SD_ERROR(SD_LOG_ERROR, un,
19725 19723 "sd_log_dev_status_event: fail to log sysevent\n");
19726 19724 }
19727 19725
19728 19726 nvlist_free(attr_list);
19729 19727 kmem_free(path, MAXPATHLEN);
19730 19728 }
19731 19729
19732 19730
19733 19731 /*
19734 19732 * Function: sd_log_lun_expansion_event
19735 19733 *
19736 19734 * Description: Log lun expansion sys event
19737 19735 *
19738 19736 * Context: Never called from interrupt context
19739 19737 */
19740 19738 static void
19741 19739 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
19742 19740 {
19743 19741 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
19744 19742 }
19745 19743
19746 19744
19747 19745 /*
19748 19746 * Function: sd_log_eject_request_event
19749 19747 *
19750 19748 * Description: Log eject request sysevent
19751 19749 *
19752 19750 * Context: Never called from interrupt context
19753 19751 */
19754 19752 static void
19755 19753 sd_log_eject_request_event(struct sd_lun *un, int km_flag)
19756 19754 {
19757 19755 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
19758 19756 }
19759 19757
19760 19758
19761 19759 /*
19762 19760 * Function: sd_media_change_task
19763 19761 *
19764 19762 * Description: Recovery action for CDROM to become available.
19765 19763 *
19766 19764 * Context: Executes in a taskq() thread context
19767 19765 */
19768 19766
19769 19767 static void
19770 19768 sd_media_change_task(void *arg)
19771 19769 {
19772 19770 struct scsi_pkt *pktp = arg;
19773 19771 struct sd_lun *un;
19774 19772 struct buf *bp;
19775 19773 struct sd_xbuf *xp;
19776 19774 int err = 0;
19777 19775 int retry_count = 0;
19778 19776 int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
19779 19777 struct sd_sense_info si;
19780 19778
19781 19779 ASSERT(pktp != NULL);
19782 19780 bp = (struct buf *)pktp->pkt_private;
19783 19781 ASSERT(bp != NULL);
19784 19782 xp = SD_GET_XBUF(bp);
19785 19783 ASSERT(xp != NULL);
19786 19784 un = SD_GET_UN(bp);
19787 19785 ASSERT(un != NULL);
19788 19786 ASSERT(!mutex_owned(SD_MUTEX(un)));
19789 19787 ASSERT(un->un_f_monitor_media_state);
19790 19788
19791 19789 si.ssi_severity = SCSI_ERR_INFO;
19792 19790 si.ssi_pfa_flag = FALSE;
19793 19791
19794 19792 /*
19795 19793 * When a reset is issued on a CDROM, it takes a long time to
19796 19794 * recover. First few attempts to read capacity and other things
19797 19795 * related to handling unit attention fail (with a ASC 0x4 and
19798 19796 * ASCQ 0x1). In that case we want to do enough retries and we want
19799 19797 * to limit the retries in other cases of genuine failures like
19800 19798 * no media in drive.
19801 19799 */
19802 19800 while (retry_count++ < retry_limit) {
19803 19801 if ((err = sd_handle_mchange(un)) == 0) {
19804 19802 break;
19805 19803 }
19806 19804 if (err == EAGAIN) {
19807 19805 retry_limit = SD_UNIT_ATTENTION_RETRY;
19808 19806 }
19809 19807 /* Sleep for 0.5 sec. & try again */
19810 19808 delay(drv_usectohz(500000));
19811 19809 }
19812 19810
19813 19811 /*
19814 19812 * Dispatch (retry or fail) the original command here,
19815 19813 * along with appropriate console messages....
19816 19814 *
19817 19815 * Must grab the mutex before calling sd_retry_command,
19818 19816 * sd_print_sense_msg and sd_return_failed_command.
19819 19817 */
19820 19818 mutex_enter(SD_MUTEX(un));
19821 19819 if (err != SD_CMD_SUCCESS) {
19822 19820 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19823 19821 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
19824 19822 si.ssi_severity = SCSI_ERR_FATAL;
19825 19823 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
19826 19824 sd_return_failed_command(un, bp, EIO);
19827 19825 } else {
19828 19826 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
19829 19827 &si, EIO, (clock_t)0, NULL);
19830 19828 }
19831 19829 mutex_exit(SD_MUTEX(un));
19832 19830 }
19833 19831
19834 19832
19835 19833
19836 19834 /*
19837 19835 * Function: sd_handle_mchange
19838 19836 *
19839 19837 * Description: Perform geometry validation & other recovery when CDROM
19840 19838 * has been removed from drive.
19841 19839 *
19842 19840 * Return Code: 0 for success
19843 19841 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19844 19842 * sd_send_scsi_READ_CAPACITY()
19845 19843 *
19846 19844 * Context: Executes in a taskq() thread context
19847 19845 */
19848 19846
19849 19847 static int
19850 19848 sd_handle_mchange(struct sd_lun *un)
19851 19849 {
19852 19850 uint64_t capacity;
19853 19851 uint32_t lbasize;
19854 19852 int rval;
19855 19853 sd_ssc_t *ssc;
19856 19854
19857 19855 ASSERT(!mutex_owned(SD_MUTEX(un)));
19858 19856 ASSERT(un->un_f_monitor_media_state);
19859 19857
19860 19858 ssc = sd_ssc_init(un);
19861 19859 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
19862 19860 SD_PATH_DIRECT_PRIORITY);
19863 19861
19864 19862 if (rval != 0)
19865 19863 goto failed;
19866 19864
19867 19865 mutex_enter(SD_MUTEX(un));
19868 19866 sd_update_block_info(un, lbasize, capacity);
19869 19867
19870 19868 if (un->un_errstats != NULL) {
19871 19869 struct sd_errstats *stp =
19872 19870 (struct sd_errstats *)un->un_errstats->ks_data;
19873 19871 stp->sd_capacity.value.ui64 = (uint64_t)
19874 19872 ((uint64_t)un->un_blockcount *
19875 19873 (uint64_t)un->un_tgt_blocksize);
19876 19874 }
19877 19875
19878 19876 /*
19879 19877 * Check if the media in the device is writable or not
19880 19878 */
19881 19879 if (ISCD(un)) {
19882 19880 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
19883 19881 }
19884 19882
19885 19883 /*
19886 19884 * Note: Maybe let the strategy/partitioning chain worry about getting
19887 19885 * valid geometry.
19888 19886 */
19889 19887 mutex_exit(SD_MUTEX(un));
19890 19888 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
19891 19889
19892 19890
19893 19891 if (cmlb_validate(un->un_cmlbhandle, 0,
19894 19892 (void *)SD_PATH_DIRECT_PRIORITY) != 0) {
19895 19893 sd_ssc_fini(ssc);
19896 19894 return (EIO);
19897 19895 } else {
19898 19896 if (un->un_f_pkstats_enabled) {
19899 19897 sd_set_pstats(un);
19900 19898 SD_TRACE(SD_LOG_IO_PARTITION, un,
19901 19899 "sd_handle_mchange: un:0x%p pstats created and "
19902 19900 "set\n", un);
19903 19901 }
19904 19902 }
19905 19903
19906 19904 /*
19907 19905 * Try to lock the door
19908 19906 */
19909 19907 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
19910 19908 SD_PATH_DIRECT_PRIORITY);
19911 19909 failed:
19912 19910 if (rval != 0)
19913 19911 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19914 19912 sd_ssc_fini(ssc);
19915 19913 return (rval);
19916 19914 }
19917 19915
19918 19916
19919 19917 /*
19920 19918 * Function: sd_send_scsi_DOORLOCK
19921 19919 *
19922 19920 * Description: Issue the scsi DOOR LOCK command
19923 19921 *
19924 19922 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19925 19923 * structure for this target.
19926 19924 * flag - SD_REMOVAL_ALLOW
19927 19925 * SD_REMOVAL_PREVENT
19928 19926 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19929 19927 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19930 19928 * to use the USCSI "direct" chain and bypass the normal
19931 19929 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19932 19930 * command is issued as part of an error recovery action.
19933 19931 *
19934 19932 * Return Code: 0 - Success
19935 19933 * errno return code from sd_ssc_send()
19936 19934 *
19937 19935 * Context: Can sleep.
19938 19936 */
19939 19937
19940 19938 static int
19941 19939 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
19942 19940 {
19943 19941 struct scsi_extended_sense sense_buf;
19944 19942 union scsi_cdb cdb;
19945 19943 struct uscsi_cmd ucmd_buf;
19946 19944 int status;
19947 19945 struct sd_lun *un;
19948 19946
19949 19947 ASSERT(ssc != NULL);
19950 19948 un = ssc->ssc_un;
19951 19949 ASSERT(un != NULL);
19952 19950 ASSERT(!mutex_owned(SD_MUTEX(un)));
19953 19951
19954 19952 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
19955 19953
19956 19954 /* already determined doorlock is not supported, fake success */
19957 19955 if (un->un_f_doorlock_supported == FALSE) {
19958 19956 return (0);
19959 19957 }
19960 19958
19961 19959 /*
19962 19960 * If we are ejecting and see an SD_REMOVAL_PREVENT
19963 19961 * ignore the command so we can complete the eject
19964 19962 * operation.
19965 19963 */
19966 19964 if (flag == SD_REMOVAL_PREVENT) {
19967 19965 mutex_enter(SD_MUTEX(un));
19968 19966 if (un->un_f_ejecting == TRUE) {
19969 19967 mutex_exit(SD_MUTEX(un));
19970 19968 return (EAGAIN);
19971 19969 }
19972 19970 mutex_exit(SD_MUTEX(un));
19973 19971 }
19974 19972
19975 19973 bzero(&cdb, sizeof (cdb));
19976 19974 bzero(&ucmd_buf, sizeof (ucmd_buf));
19977 19975
19978 19976 cdb.scc_cmd = SCMD_DOORLOCK;
19979 19977 cdb.cdb_opaque[4] = (uchar_t)flag;
19980 19978
19981 19979 ucmd_buf.uscsi_cdb = (char *)&cdb;
19982 19980 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
19983 19981 ucmd_buf.uscsi_bufaddr = NULL;
19984 19982 ucmd_buf.uscsi_buflen = 0;
19985 19983 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
19986 19984 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
19987 19985 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
19988 19986 ucmd_buf.uscsi_timeout = 15;
19989 19987
19990 19988 SD_TRACE(SD_LOG_IO, un,
19991 19989 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19992 19990
19993 19991 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
19994 19992 UIO_SYSSPACE, path_flag);
19995 19993
19996 19994 if (status == 0)
19997 19995 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
19998 19996
19999 19997 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
20000 19998 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20001 19999 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
20002 20000 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20003 20001
20004 20002 /* fake success and skip subsequent doorlock commands */
20005 20003 un->un_f_doorlock_supported = FALSE;
20006 20004 return (0);
20007 20005 }
20008 20006
20009 20007 return (status);
20010 20008 }
20011 20009
20012 20010 /*
20013 20011 * Function: sd_send_scsi_READ_CAPACITY
20014 20012 *
20015 20013 * Description: This routine uses the scsi READ CAPACITY command to determine
20016 20014 * the device capacity in number of blocks and the device native
20017 20015 * block size. If this function returns a failure, then the
20018 20016 * values in *capp and *lbap are undefined. If the capacity
20019 20017 * returned is 0xffffffff then the lun is too large for a
20020 20018 * normal READ CAPACITY command and the results of a
20021 20019 * READ CAPACITY 16 will be used instead.
20022 20020 *
20023 20021 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20024 20022 * capp - ptr to unsigned 64-bit variable to receive the
20025 20023 * capacity value from the command.
20026 20024 * lbap - ptr to unsigned 32-bit varaible to receive the
20027 20025 * block size value from the command
20028 20026 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20029 20027 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20030 20028 * to use the USCSI "direct" chain and bypass the normal
20031 20029 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20032 20030 * command is issued as part of an error recovery action.
20033 20031 *
20034 20032 * Return Code: 0 - Success
20035 20033 * EIO - IO error
20036 20034 * EACCES - Reservation conflict detected
20037 20035 * EAGAIN - Device is becoming ready
20038 20036 * errno return code from sd_ssc_send()
20039 20037 *
20040 20038 * Context: Can sleep. Blocks until command completes.
20041 20039 */
20042 20040
20043 20041 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20044 20042
20045 20043 static int
20046 20044 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20047 20045 int path_flag)
20048 20046 {
20049 20047 struct scsi_extended_sense sense_buf;
20050 20048 struct uscsi_cmd ucmd_buf;
20051 20049 union scsi_cdb cdb;
20052 20050 uint32_t *capacity_buf;
20053 20051 uint64_t capacity;
20054 20052 uint32_t lbasize;
20055 20053 uint32_t pbsize;
20056 20054 int status;
20057 20055 struct sd_lun *un;
20058 20056
20059 20057 ASSERT(ssc != NULL);
20060 20058
20061 20059 un = ssc->ssc_un;
20062 20060 ASSERT(un != NULL);
20063 20061 ASSERT(!mutex_owned(SD_MUTEX(un)));
20064 20062 ASSERT(capp != NULL);
20065 20063 ASSERT(lbap != NULL);
20066 20064
20067 20065 SD_TRACE(SD_LOG_IO, un,
20068 20066 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20069 20067
20070 20068 /*
20071 20069 * First send a READ_CAPACITY command to the target.
20072 20070 * (This command is mandatory under SCSI-2.)
20073 20071 *
20074 20072 * Set up the CDB for the READ_CAPACITY command. The Partial
20075 20073 * Medium Indicator bit is cleared. The address field must be
20076 20074 * zero if the PMI bit is zero.
20077 20075 */
20078 20076 bzero(&cdb, sizeof (cdb));
20079 20077 bzero(&ucmd_buf, sizeof (ucmd_buf));
20080 20078
20081 20079 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
20082 20080
20083 20081 cdb.scc_cmd = SCMD_READ_CAPACITY;
20084 20082
20085 20083 ucmd_buf.uscsi_cdb = (char *)&cdb;
20086 20084 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20087 20085 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
20088 20086 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
20089 20087 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20090 20088 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20091 20089 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20092 20090 ucmd_buf.uscsi_timeout = 60;
20093 20091
20094 20092 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20095 20093 UIO_SYSSPACE, path_flag);
20096 20094
20097 20095 switch (status) {
20098 20096 case 0:
20099 20097 /* Return failure if we did not get valid capacity data. */
20100 20098 if (ucmd_buf.uscsi_resid != 0) {
20101 20099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20102 20100 "sd_send_scsi_READ_CAPACITY received invalid "
20103 20101 "capacity data");
20104 20102 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20105 20103 return (EIO);
20106 20104 }
20107 20105 /*
20108 20106 * Read capacity and block size from the READ CAPACITY 10 data.
20109 20107 * This data may be adjusted later due to device specific
20110 20108 * issues.
20111 20109 *
20112 20110 * According to the SCSI spec, the READ CAPACITY 10
20113 20111 * command returns the following:
20114 20112 *
20115 20113 * bytes 0-3: Maximum logical block address available.
20116 20114 * (MSB in byte:0 & LSB in byte:3)
20117 20115 *
20118 20116 * bytes 4-7: Block length in bytes
20119 20117 * (MSB in byte:4 & LSB in byte:7)
20120 20118 *
20121 20119 */
20122 20120 capacity = BE_32(capacity_buf[0]);
20123 20121 lbasize = BE_32(capacity_buf[1]);
20124 20122
20125 20123 /*
20126 20124 * Done with capacity_buf
20127 20125 */
20128 20126 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20129 20127
20130 20128 /*
20131 20129 * if the reported capacity is set to all 0xf's, then
20132 20130 * this disk is too large and requires SBC-2 commands.
20133 20131 * Reissue the request using READ CAPACITY 16.
20134 20132 */
20135 20133 if (capacity == 0xffffffff) {
20136 20134 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20137 20135 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
20138 20136 &lbasize, &pbsize, path_flag);
20139 20137 if (status != 0) {
20140 20138 return (status);
20141 20139 } else {
20142 20140 goto rc16_done;
20143 20141 }
20144 20142 }
20145 20143 break; /* Success! */
20146 20144 case EIO:
20147 20145 switch (ucmd_buf.uscsi_status) {
20148 20146 case STATUS_RESERVATION_CONFLICT:
20149 20147 status = EACCES;
20150 20148 break;
20151 20149 case STATUS_CHECK:
20152 20150 /*
20153 20151 * Check condition; look for ASC/ASCQ of 0x04/0x01
20154 20152 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20155 20153 */
20156 20154 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20157 20155 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20158 20156 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20159 20157 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20160 20158 return (EAGAIN);
20161 20159 }
20162 20160 break;
20163 20161 default:
20164 20162 break;
20165 20163 }
20166 20164 /* FALLTHRU */
20167 20165 default:
20168 20166 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20169 20167 return (status);
20170 20168 }
20171 20169
20172 20170 /*
20173 20171 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20174 20172 * (2352 and 0 are common) so for these devices always force the value
20175 20173 * to 2048 as required by the ATAPI specs.
20176 20174 */
20177 20175 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20178 20176 lbasize = 2048;
20179 20177 }
20180 20178
20181 20179 /*
20182 20180 * Get the maximum LBA value from the READ CAPACITY data.
20183 20181 * Here we assume that the Partial Medium Indicator (PMI) bit
20184 20182 * was cleared when issuing the command. This means that the LBA
20185 20183 * returned from the device is the LBA of the last logical block
20186 20184 * on the logical unit. The actual logical block count will be
20187 20185 * this value plus one.
20188 20186 */
20189 20187 capacity += 1;
20190 20188
20191 20189 /*
20192 20190 * Currently, for removable media, the capacity is saved in terms
20193 20191 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20194 20192 */
20195 20193 if (un->un_f_has_removable_media)
20196 20194 capacity *= (lbasize / un->un_sys_blocksize);
20197 20195
20198 20196 rc16_done:
20199 20197
20200 20198 /*
20201 20199 * Copy the values from the READ CAPACITY command into the space
20202 20200 * provided by the caller.
20203 20201 */
20204 20202 *capp = capacity;
20205 20203 *lbap = lbasize;
20206 20204
20207 20205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
20208 20206 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
20209 20207
20210 20208 /*
20211 20209 * Both the lbasize and capacity from the device must be nonzero,
20212 20210 * otherwise we assume that the values are not valid and return
20213 20211 * failure to the caller. (4203735)
20214 20212 */
20215 20213 if ((capacity == 0) || (lbasize == 0)) {
20216 20214 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20217 20215 "sd_send_scsi_READ_CAPACITY received invalid value "
20218 20216 "capacity %llu lbasize %d", capacity, lbasize);
20219 20217 return (EIO);
20220 20218 }
20221 20219 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20222 20220 return (0);
20223 20221 }
20224 20222
20225 20223 /*
20226 20224 * Function: sd_send_scsi_READ_CAPACITY_16
20227 20225 *
20228 20226 * Description: This routine uses the scsi READ CAPACITY 16 command to
20229 20227 * determine the device capacity in number of blocks and the
20230 20228 * device native block size. If this function returns a failure,
20231 20229 * then the values in *capp and *lbap are undefined.
20232 20230 * This routine should be called by sd_send_scsi_READ_CAPACITY
20233 20231 * which will apply any device specific adjustments to capacity
20234 20232 * and lbasize. One exception is it is also called by
20235 20233 * sd_get_media_info_ext. In that function, there is no need to
20236 20234 * adjust the capacity and lbasize.
20237 20235 *
20238 20236 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20239 20237 * capp - ptr to unsigned 64-bit variable to receive the
20240 20238 * capacity value from the command.
20241 20239 * lbap - ptr to unsigned 32-bit varaible to receive the
20242 20240 * block size value from the command
20243 20241 * psp - ptr to unsigned 32-bit variable to receive the
20244 20242 * physical block size value from the command
20245 20243 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20246 20244 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20247 20245 * to use the USCSI "direct" chain and bypass the normal
20248 20246 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20249 20247 * this command is issued as part of an error recovery
20250 20248 * action.
20251 20249 *
20252 20250 * Return Code: 0 - Success
20253 20251 * EIO - IO error
20254 20252 * EACCES - Reservation conflict detected
20255 20253 * EAGAIN - Device is becoming ready
20256 20254 * errno return code from sd_ssc_send()
20257 20255 *
20258 20256 * Context: Can sleep. Blocks until command completes.
20259 20257 */
20260 20258
20261 20259 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20262 20260
20263 20261 static int
20264 20262 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
20265 20263 uint32_t *lbap, uint32_t *psp, int path_flag)
20266 20264 {
20267 20265 struct scsi_extended_sense sense_buf;
20268 20266 struct uscsi_cmd ucmd_buf;
20269 20267 union scsi_cdb cdb;
20270 20268 uint64_t *capacity16_buf;
20271 20269 uint64_t capacity;
20272 20270 uint32_t lbasize;
20273 20271 uint32_t pbsize;
20274 20272 uint32_t lbpb_exp;
20275 20273 int status;
20276 20274 struct sd_lun *un;
20277 20275
20278 20276 ASSERT(ssc != NULL);
20279 20277
20280 20278 un = ssc->ssc_un;
20281 20279 ASSERT(un != NULL);
20282 20280 ASSERT(!mutex_owned(SD_MUTEX(un)));
20283 20281 ASSERT(capp != NULL);
20284 20282 ASSERT(lbap != NULL);
20285 20283
20286 20284 SD_TRACE(SD_LOG_IO, un,
20287 20285 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20288 20286
20289 20287 /*
20290 20288 * First send a READ_CAPACITY_16 command to the target.
20291 20289 *
20292 20290 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20293 20291 * Medium Indicator bit is cleared. The address field must be
20294 20292 * zero if the PMI bit is zero.
20295 20293 */
20296 20294 bzero(&cdb, sizeof (cdb));
20297 20295 bzero(&ucmd_buf, sizeof (ucmd_buf));
20298 20296
20299 20297 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20300 20298
20301 20299 ucmd_buf.uscsi_cdb = (char *)&cdb;
20302 20300 ucmd_buf.uscsi_cdblen = CDB_GROUP4;
20303 20301 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
20304 20302 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
20305 20303 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20306 20304 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20307 20305 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20308 20306 ucmd_buf.uscsi_timeout = 60;
20309 20307
20310 20308 /*
20311 20309 * Read Capacity (16) is a Service Action In command. One
20312 20310 * command byte (0x9E) is overloaded for multiple operations,
20313 20311 * with the second CDB byte specifying the desired operation
20314 20312 */
20315 20313 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20316 20314 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20317 20315
20318 20316 /*
20319 20317 * Fill in allocation length field
20320 20318 */
20321 20319 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
20322 20320
20323 20321 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20324 20322 UIO_SYSSPACE, path_flag);
20325 20323
20326 20324 switch (status) {
20327 20325 case 0:
20328 20326 /* Return failure if we did not get valid capacity data. */
20329 20327 if (ucmd_buf.uscsi_resid > 20) {
20330 20328 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20331 20329 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20332 20330 "capacity data");
20333 20331 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20334 20332 return (EIO);
20335 20333 }
20336 20334
20337 20335 /*
20338 20336 * Read capacity and block size from the READ CAPACITY 16 data.
20339 20337 * This data may be adjusted later due to device specific
20340 20338 * issues.
20341 20339 *
20342 20340 * According to the SCSI spec, the READ CAPACITY 16
20343 20341 * command returns the following:
20344 20342 *
20345 20343 * bytes 0-7: Maximum logical block address available.
20346 20344 * (MSB in byte:0 & LSB in byte:7)
20347 20345 *
20348 20346 * bytes 8-11: Block length in bytes
20349 20347 * (MSB in byte:8 & LSB in byte:11)
20350 20348 *
20351 20349 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20352 20350 */
20353 20351 capacity = BE_64(capacity16_buf[0]);
20354 20352 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20355 20353 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20356 20354
20357 20355 pbsize = lbasize << lbpb_exp;
20358 20356
20359 20357 /*
20360 20358 * Done with capacity16_buf
20361 20359 */
20362 20360 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20363 20361
20364 20362 /*
20365 20363 * if the reported capacity is set to all 0xf's, then
20366 20364 * this disk is too large. This could only happen with
20367 20365 * a device that supports LBAs larger than 64 bits which
20368 20366 * are not defined by any current T10 standards.
20369 20367 */
20370 20368 if (capacity == 0xffffffffffffffff) {
20371 20369 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20372 20370 "disk is too large");
20373 20371 return (EIO);
20374 20372 }
20375 20373 break; /* Success! */
20376 20374 case EIO:
20377 20375 switch (ucmd_buf.uscsi_status) {
20378 20376 case STATUS_RESERVATION_CONFLICT:
20379 20377 status = EACCES;
20380 20378 break;
20381 20379 case STATUS_CHECK:
20382 20380 /*
20383 20381 * Check condition; look for ASC/ASCQ of 0x04/0x01
20384 20382 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20385 20383 */
20386 20384 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20387 20385 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20388 20386 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20389 20387 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20390 20388 return (EAGAIN);
20391 20389 }
20392 20390 break;
20393 20391 default:
20394 20392 break;
20395 20393 }
20396 20394 /* FALLTHRU */
20397 20395 default:
20398 20396 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20399 20397 return (status);
20400 20398 }
20401 20399
20402 20400 /*
20403 20401 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20404 20402 * (2352 and 0 are common) so for these devices always force the value
20405 20403 * to 2048 as required by the ATAPI specs.
20406 20404 */
20407 20405 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20408 20406 lbasize = 2048;
20409 20407 }
20410 20408
20411 20409 /*
20412 20410 * Get the maximum LBA value from the READ CAPACITY 16 data.
20413 20411 * Here we assume that the Partial Medium Indicator (PMI) bit
20414 20412 * was cleared when issuing the command. This means that the LBA
20415 20413 * returned from the device is the LBA of the last logical block
20416 20414 * on the logical unit. The actual logical block count will be
20417 20415 * this value plus one.
20418 20416 */
20419 20417 capacity += 1;
20420 20418
20421 20419 /*
20422 20420 * Currently, for removable media, the capacity is saved in terms
20423 20421 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20424 20422 */
20425 20423 if (un->un_f_has_removable_media)
20426 20424 capacity *= (lbasize / un->un_sys_blocksize);
20427 20425
20428 20426 *capp = capacity;
20429 20427 *lbap = lbasize;
20430 20428 *psp = pbsize;
20431 20429
20432 20430 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
20433 20431 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20434 20432 capacity, lbasize, pbsize);
20435 20433
20436 20434 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
20437 20435 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20438 20436 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20439 20437 "capacity %llu lbasize %d pbsize %d", capacity, lbasize);
20440 20438 return (EIO);
20441 20439 }
20442 20440
20443 20441 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20444 20442 return (0);
20445 20443 }
20446 20444
20447 20445
20448 20446 /*
20449 20447 * Function: sd_send_scsi_START_STOP_UNIT
20450 20448 *
20451 20449 * Description: Issue a scsi START STOP UNIT command to the target.
20452 20450 *
20453 20451 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20454 20452 * structure for this target.
20455 20453 * pc_flag - SD_POWER_CONDITION
20456 20454 * SD_START_STOP
20457 20455 * flag - SD_TARGET_START
20458 20456 * SD_TARGET_STOP
20459 20457 * SD_TARGET_EJECT
20460 20458 * SD_TARGET_CLOSE
20461 20459 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20462 20460 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20463 20461 * to use the USCSI "direct" chain and bypass the normal
20464 20462 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20465 20463 * command is issued as part of an error recovery action.
20466 20464 *
20467 20465 * Return Code: 0 - Success
20468 20466 * EIO - IO error
20469 20467 * EACCES - Reservation conflict detected
20470 20468 * ENXIO - Not Ready, medium not present
20471 20469 * errno return code from sd_ssc_send()
20472 20470 *
20473 20471 * Context: Can sleep.
20474 20472 */
20475 20473
20476 20474 static int
20477 20475 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
20478 20476 int path_flag)
20479 20477 {
20480 20478 struct scsi_extended_sense sense_buf;
20481 20479 union scsi_cdb cdb;
20482 20480 struct uscsi_cmd ucmd_buf;
20483 20481 int status;
20484 20482 struct sd_lun *un;
20485 20483
20486 20484 ASSERT(ssc != NULL);
20487 20485 un = ssc->ssc_un;
20488 20486 ASSERT(un != NULL);
20489 20487 ASSERT(!mutex_owned(SD_MUTEX(un)));
20490 20488
20491 20489 SD_TRACE(SD_LOG_IO, un,
20492 20490 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
20493 20491
20494 20492 if (un->un_f_check_start_stop &&
20495 20493 (pc_flag == SD_START_STOP) &&
20496 20494 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
20497 20495 (un->un_f_start_stop_supported != TRUE)) {
20498 20496 return (0);
20499 20497 }
20500 20498
20501 20499 /*
20502 20500 * If we are performing an eject operation and
20503 20501 * we receive any command other than SD_TARGET_EJECT
20504 20502 * we should immediately return.
20505 20503 */
20506 20504 if (flag != SD_TARGET_EJECT) {
20507 20505 mutex_enter(SD_MUTEX(un));
20508 20506 if (un->un_f_ejecting == TRUE) {
20509 20507 mutex_exit(SD_MUTEX(un));
20510 20508 return (EAGAIN);
20511 20509 }
20512 20510 mutex_exit(SD_MUTEX(un));
20513 20511 }
20514 20512
20515 20513 bzero(&cdb, sizeof (cdb));
20516 20514 bzero(&ucmd_buf, sizeof (ucmd_buf));
20517 20515 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20518 20516
20519 20517 cdb.scc_cmd = SCMD_START_STOP;
20520 20518 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20521 20519 (uchar_t)(flag << 4) : (uchar_t)flag;
20522 20520
20523 20521 ucmd_buf.uscsi_cdb = (char *)&cdb;
20524 20522 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20525 20523 ucmd_buf.uscsi_bufaddr = NULL;
20526 20524 ucmd_buf.uscsi_buflen = 0;
20527 20525 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20528 20526 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20529 20527 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20530 20528 ucmd_buf.uscsi_timeout = 200;
20531 20529
20532 20530 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20533 20531 UIO_SYSSPACE, path_flag);
20534 20532
20535 20533 switch (status) {
20536 20534 case 0:
20537 20535 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20538 20536 break; /* Success! */
20539 20537 case EIO:
20540 20538 switch (ucmd_buf.uscsi_status) {
20541 20539 case STATUS_RESERVATION_CONFLICT:
20542 20540 status = EACCES;
20543 20541 break;
20544 20542 case STATUS_CHECK:
20545 20543 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
20546 20544 switch (scsi_sense_key(
20547 20545 (uint8_t *)&sense_buf)) {
20548 20546 case KEY_ILLEGAL_REQUEST:
20549 20547 status = ENOTSUP;
20550 20548 break;
20551 20549 case KEY_NOT_READY:
20552 20550 if (scsi_sense_asc(
20553 20551 (uint8_t *)&sense_buf)
20554 20552 == 0x3A) {
20555 20553 status = ENXIO;
20556 20554 }
20557 20555 break;
20558 20556 default:
20559 20557 break;
20560 20558 }
20561 20559 }
20562 20560 break;
20563 20561 default:
20564 20562 break;
20565 20563 }
20566 20564 break;
20567 20565 default:
20568 20566 break;
20569 20567 }
20570 20568
20571 20569 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
20572 20570
20573 20571 return (status);
20574 20572 }
20575 20573
20576 20574
20577 20575 /*
20578 20576 * Function: sd_start_stop_unit_callback
20579 20577 *
20580 20578 * Description: timeout(9F) callback to begin recovery process for a
20581 20579 * device that has spun down.
20582 20580 *
20583 20581 * Arguments: arg - pointer to associated softstate struct.
20584 20582 *
20585 20583 * Context: Executes in a timeout(9F) thread context
20586 20584 */
20587 20585
20588 20586 static void
20589 20587 sd_start_stop_unit_callback(void *arg)
20590 20588 {
20591 20589 struct sd_lun *un = arg;
20592 20590 ASSERT(un != NULL);
20593 20591 ASSERT(!mutex_owned(SD_MUTEX(un)));
20594 20592
20595 20593 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
20596 20594
20597 20595 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
20598 20596 }
20599 20597
20600 20598
20601 20599 /*
20602 20600 * Function: sd_start_stop_unit_task
20603 20601 *
20604 20602 * Description: Recovery procedure when a drive is spun down.
20605 20603 *
20606 20604 * Arguments: arg - pointer to associated softstate struct.
20607 20605 *
20608 20606 * Context: Executes in a taskq() thread context
20609 20607 */
20610 20608
20611 20609 static void
20612 20610 sd_start_stop_unit_task(void *arg)
20613 20611 {
20614 20612 struct sd_lun *un = arg;
20615 20613 sd_ssc_t *ssc;
20616 20614 int power_level;
20617 20615 int rval;
20618 20616
20619 20617 ASSERT(un != NULL);
20620 20618 ASSERT(!mutex_owned(SD_MUTEX(un)));
20621 20619
20622 20620 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
20623 20621
20624 20622 /*
20625 20623 * Some unformatted drives report not ready error, no need to
20626 20624 * restart if format has been initiated.
20627 20625 */
20628 20626 mutex_enter(SD_MUTEX(un));
20629 20627 if (un->un_f_format_in_progress == TRUE) {
20630 20628 mutex_exit(SD_MUTEX(un));
20631 20629 return;
20632 20630 }
20633 20631 mutex_exit(SD_MUTEX(un));
20634 20632
20635 20633 ssc = sd_ssc_init(un);
20636 20634 /*
20637 20635 * When a START STOP command is issued from here, it is part of a
20638 20636 * failure recovery operation and must be issued before any other
20639 20637 * commands, including any pending retries. Thus it must be sent
20640 20638 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20641 20639 * succeeds or not, we will start I/O after the attempt.
20642 20640 * If power condition is supported and the current power level
20643 20641 * is capable of performing I/O, we should set the power condition
20644 20642 * to that level. Otherwise, set the power condition to ACTIVE.
20645 20643 */
20646 20644 if (un->un_f_power_condition_supported) {
20647 20645 mutex_enter(SD_MUTEX(un));
20648 20646 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
20649 20647 power_level = sd_pwr_pc.ran_perf[un->un_power_level]
20650 20648 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
20651 20649 mutex_exit(SD_MUTEX(un));
20652 20650 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
20653 20651 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
20654 20652 } else {
20655 20653 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
20656 20654 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
20657 20655 }
20658 20656
20659 20657 if (rval != 0)
20660 20658 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20661 20659 sd_ssc_fini(ssc);
20662 20660 /*
20663 20661 * The above call blocks until the START_STOP_UNIT command completes.
20664 20662 * Now that it has completed, we must re-try the original IO that
20665 20663 * received the NOT READY condition in the first place. There are
20666 20664 * three possible conditions here:
20667 20665 *
20668 20666 * (1) The original IO is on un_retry_bp.
20669 20667 * (2) The original IO is on the regular wait queue, and un_retry_bp
20670 20668 * is NULL.
20671 20669 * (3) The original IO is on the regular wait queue, and un_retry_bp
20672 20670 * points to some other, unrelated bp.
20673 20671 *
20674 20672 * For each case, we must call sd_start_cmds() with un_retry_bp
20675 20673 * as the argument. If un_retry_bp is NULL, this will initiate
20676 20674 * processing of the regular wait queue. If un_retry_bp is not NULL,
20677 20675 * then this will process the bp on un_retry_bp. That may or may not
20678 20676 * be the original IO, but that does not matter: the important thing
20679 20677 * is to keep the IO processing going at this point.
20680 20678 *
20681 20679 * Note: This is a very specific error recovery sequence associated
20682 20680 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20683 20681 * serialize the I/O with completion of the spin-up.
20684 20682 */
20685 20683 mutex_enter(SD_MUTEX(un));
20686 20684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
20687 20685 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20688 20686 un, un->un_retry_bp);
20689 20687 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */
20690 20688 sd_start_cmds(un, un->un_retry_bp);
20691 20689 mutex_exit(SD_MUTEX(un));
20692 20690
20693 20691 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
20694 20692 }
20695 20693
20696 20694
20697 20695 /*
20698 20696 * Function: sd_send_scsi_INQUIRY
20699 20697 *
20700 20698 * Description: Issue the scsi INQUIRY command.
20701 20699 *
20702 20700 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20703 20701 * structure for this target.
20704 20702 * bufaddr
20705 20703 * buflen
20706 20704 * evpd
20707 20705 * page_code
20708 20706 * page_length
20709 20707 *
20710 20708 * Return Code: 0 - Success
20711 20709 * errno return code from sd_ssc_send()
20712 20710 *
20713 20711 * Context: Can sleep. Does not return until command is completed.
20714 20712 */
20715 20713
20716 20714 static int
20717 20715 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
20718 20716 uchar_t evpd, uchar_t page_code, size_t *residp)
20719 20717 {
20720 20718 union scsi_cdb cdb;
20721 20719 struct uscsi_cmd ucmd_buf;
20722 20720 int status;
20723 20721 struct sd_lun *un;
20724 20722
20725 20723 ASSERT(ssc != NULL);
20726 20724 un = ssc->ssc_un;
20727 20725 ASSERT(un != NULL);
20728 20726 ASSERT(!mutex_owned(SD_MUTEX(un)));
20729 20727 ASSERT(bufaddr != NULL);
20730 20728
20731 20729 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
20732 20730
20733 20731 bzero(&cdb, sizeof (cdb));
20734 20732 bzero(&ucmd_buf, sizeof (ucmd_buf));
20735 20733 bzero(bufaddr, buflen);
20736 20734
20737 20735 cdb.scc_cmd = SCMD_INQUIRY;
20738 20736 cdb.cdb_opaque[1] = evpd;
20739 20737 cdb.cdb_opaque[2] = page_code;
20740 20738 FORMG0COUNT(&cdb, buflen);
20741 20739
20742 20740 ucmd_buf.uscsi_cdb = (char *)&cdb;
20743 20741 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20744 20742 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
20745 20743 ucmd_buf.uscsi_buflen = buflen;
20746 20744 ucmd_buf.uscsi_rqbuf = NULL;
20747 20745 ucmd_buf.uscsi_rqlen = 0;
20748 20746 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
20749 20747 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */
20750 20748
20751 20749 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20752 20750 UIO_SYSSPACE, SD_PATH_DIRECT);
20753 20751
20754 20752 /*
20755 20753 * Only handle status == 0, the upper-level caller
20756 20754 * will put different assessment based on the context.
20757 20755 */
20758 20756 if (status == 0)
20759 20757 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20760 20758
20761 20759 if ((status == 0) && (residp != NULL)) {
20762 20760 *residp = ucmd_buf.uscsi_resid;
20763 20761 }
20764 20762
20765 20763 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
20766 20764
20767 20765 return (status);
20768 20766 }
20769 20767
20770 20768
20771 20769 /*
20772 20770 * Function: sd_send_scsi_TEST_UNIT_READY
20773 20771 *
20774 20772 * Description: Issue the scsi TEST UNIT READY command.
20775 20773 * This routine can be told to set the flag USCSI_DIAGNOSE to
20776 20774 * prevent retrying failed commands. Use this when the intent
20777 20775 * is either to check for device readiness, to clear a Unit
20778 20776 * Attention, or to clear any outstanding sense data.
20779 20777 * However under specific conditions the expected behavior
20780 20778 * is for retries to bring a device ready, so use the flag
20781 20779 * with caution.
20782 20780 *
20783 20781 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20784 20782 * structure for this target.
20785 20783 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20786 20784 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20787 20785 * 0: dont check for media present, do retries on cmd.
20788 20786 *
20789 20787 * Return Code: 0 - Success
20790 20788 * EIO - IO error
20791 20789 * EACCES - Reservation conflict detected
20792 20790 * ENXIO - Not Ready, medium not present
20793 20791 * errno return code from sd_ssc_send()
20794 20792 *
20795 20793 * Context: Can sleep. Does not return until command is completed.
20796 20794 */
20797 20795
20798 20796 static int
20799 20797 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
20800 20798 {
20801 20799 struct scsi_extended_sense sense_buf;
20802 20800 union scsi_cdb cdb;
20803 20801 struct uscsi_cmd ucmd_buf;
20804 20802 int status;
20805 20803 struct sd_lun *un;
20806 20804
20807 20805 ASSERT(ssc != NULL);
20808 20806 un = ssc->ssc_un;
20809 20807 ASSERT(un != NULL);
20810 20808 ASSERT(!mutex_owned(SD_MUTEX(un)));
20811 20809
20812 20810 SD_TRACE(SD_LOG_IO, un,
20813 20811 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
20814 20812
20815 20813 /*
20816 20814 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20817 20815 * timeouts when they receive a TUR and the queue is not empty. Check
20818 20816 * the configuration flag set during attach (indicating the drive has
20819 20817 * this firmware bug) and un_ncmds_in_transport before issuing the
20820 20818 * TUR. If there are
20821 20819 * pending commands return success, this is a bit arbitrary but is ok
20822 20820 * for non-removables (i.e. the eliteI disks) and non-clustering
20823 20821 * configurations.
20824 20822 */
20825 20823 if (un->un_f_cfg_tur_check == TRUE) {
20826 20824 mutex_enter(SD_MUTEX(un));
20827 20825 if (un->un_ncmds_in_transport != 0) {
20828 20826 mutex_exit(SD_MUTEX(un));
20829 20827 return (0);
20830 20828 }
20831 20829 mutex_exit(SD_MUTEX(un));
20832 20830 }
20833 20831
20834 20832 bzero(&cdb, sizeof (cdb));
20835 20833 bzero(&ucmd_buf, sizeof (ucmd_buf));
20836 20834 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20837 20835
20838 20836 cdb.scc_cmd = SCMD_TEST_UNIT_READY;
20839 20837
20840 20838 ucmd_buf.uscsi_cdb = (char *)&cdb;
20841 20839 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20842 20840 ucmd_buf.uscsi_bufaddr = NULL;
20843 20841 ucmd_buf.uscsi_buflen = 0;
20844 20842 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20845 20843 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20846 20844 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20847 20845
20848 20846 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20849 20847 if ((flag & SD_DONT_RETRY_TUR) != 0) {
20850 20848 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20851 20849 }
20852 20850 ucmd_buf.uscsi_timeout = 60;
20853 20851
20854 20852 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20855 20853 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20856 20854 SD_PATH_STANDARD));
20857 20855
20858 20856 switch (status) {
20859 20857 case 0:
20860 20858 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20861 20859 break; /* Success! */
20862 20860 case EIO:
20863 20861 switch (ucmd_buf.uscsi_status) {
20864 20862 case STATUS_RESERVATION_CONFLICT:
20865 20863 status = EACCES;
20866 20864 break;
20867 20865 case STATUS_CHECK:
20868 20866 if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
20869 20867 break;
20870 20868 }
20871 20869 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20872 20870 (scsi_sense_key((uint8_t *)&sense_buf) ==
20873 20871 KEY_NOT_READY) &&
20874 20872 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
20875 20873 status = ENXIO;
20876 20874 }
20877 20875 break;
20878 20876 default:
20879 20877 break;
20880 20878 }
20881 20879 break;
20882 20880 default:
20883 20881 break;
20884 20882 }
20885 20883
20886 20884 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20887 20885
20888 20886 return (status);
20889 20887 }
20890 20888
20891 20889 /*
20892 20890 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20893 20891 *
20894 20892 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20895 20893 *
20896 20894 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20897 20895 * structure for this target.
20898 20896 *
20899 20897 * Return Code: 0 - Success
20900 20898 * EACCES
20901 20899 * ENOTSUP
20902 20900 * errno return code from sd_ssc_send()
20903 20901 *
20904 20902 * Context: Can sleep. Does not return until command is completed.
20905 20903 */
20906 20904
20907 20905 static int
20908 20906 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
20909 20907 uint16_t data_len, uchar_t *data_bufp)
20910 20908 {
20911 20909 struct scsi_extended_sense sense_buf;
20912 20910 union scsi_cdb cdb;
20913 20911 struct uscsi_cmd ucmd_buf;
20914 20912 int status;
20915 20913 int no_caller_buf = FALSE;
20916 20914 struct sd_lun *un;
20917 20915
20918 20916 ASSERT(ssc != NULL);
20919 20917 un = ssc->ssc_un;
20920 20918 ASSERT(un != NULL);
20921 20919 ASSERT(!mutex_owned(SD_MUTEX(un)));
20922 20920 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
20923 20921
20924 20922 SD_TRACE(SD_LOG_IO, un,
20925 20923 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
20926 20924
20927 20925 bzero(&cdb, sizeof (cdb));
20928 20926 bzero(&ucmd_buf, sizeof (ucmd_buf));
20929 20927 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20930 20928 if (data_bufp == NULL) {
20931 20929 /* Allocate a default buf if the caller did not give one */
20932 20930 ASSERT(data_len == 0);
20933 20931 data_len = MHIOC_RESV_KEY_SIZE;
20934 20932 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
20935 20933 no_caller_buf = TRUE;
20936 20934 }
20937 20935
20938 20936 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
20939 20937 cdb.cdb_opaque[1] = usr_cmd;
20940 20938 FORMG1COUNT(&cdb, data_len);
20941 20939
20942 20940 ucmd_buf.uscsi_cdb = (char *)&cdb;
20943 20941 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20944 20942 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
20945 20943 ucmd_buf.uscsi_buflen = data_len;
20946 20944 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20947 20945 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20948 20946 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20949 20947 ucmd_buf.uscsi_timeout = 60;
20950 20948
20951 20949 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20952 20950 UIO_SYSSPACE, SD_PATH_STANDARD);
20953 20951
20954 20952 switch (status) {
20955 20953 case 0:
20956 20954 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20957 20955
20958 20956 break; /* Success! */
20959 20957 case EIO:
20960 20958 switch (ucmd_buf.uscsi_status) {
20961 20959 case STATUS_RESERVATION_CONFLICT:
20962 20960 status = EACCES;
20963 20961 break;
20964 20962 case STATUS_CHECK:
20965 20963 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20966 20964 (scsi_sense_key((uint8_t *)&sense_buf) ==
20967 20965 KEY_ILLEGAL_REQUEST)) {
20968 20966 status = ENOTSUP;
20969 20967 }
20970 20968 break;
20971 20969 default:
20972 20970 break;
20973 20971 }
20974 20972 break;
20975 20973 default:
20976 20974 break;
20977 20975 }
20978 20976
20979 20977 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20980 20978
20981 20979 if (no_caller_buf == TRUE) {
20982 20980 kmem_free(data_bufp, data_len);
20983 20981 }
20984 20982
20985 20983 return (status);
20986 20984 }
20987 20985
20988 20986
20989 20987 /*
20990 20988 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20991 20989 *
20992 20990 * Description: This routine is the driver entry point for handling CD-ROM
20993 20991 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20994 20992 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20995 20993 * device.
20996 20994 *
20997 20995 * Arguments: ssc - ssc contains un - pointer to soft state struct
20998 20996 * for the target.
20999 20997 * usr_cmd SCSI-3 reservation facility command (one of
21000 20998 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
21001 20999 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR)
21002 21000 * usr_bufp - user provided pointer register, reserve descriptor or
21003 21001 * preempt and abort structure (mhioc_register_t,
21004 21002 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
21005 21003 *
21006 21004 * Return Code: 0 - Success
21007 21005 * EACCES
21008 21006 * ENOTSUP
21009 21007 * errno return code from sd_ssc_send()
21010 21008 *
21011 21009 * Context: Can sleep. Does not return until command is completed.
21012 21010 */
21013 21011
21014 21012 static int
21015 21013 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
21016 21014 uchar_t *usr_bufp)
21017 21015 {
21018 21016 struct scsi_extended_sense sense_buf;
21019 21017 union scsi_cdb cdb;
21020 21018 struct uscsi_cmd ucmd_buf;
21021 21019 int status;
21022 21020 uchar_t data_len = sizeof (sd_prout_t);
21023 21021 sd_prout_t *prp;
21024 21022 struct sd_lun *un;
21025 21023
21026 21024 ASSERT(ssc != NULL);
21027 21025 un = ssc->ssc_un;
21028 21026 ASSERT(un != NULL);
21029 21027 ASSERT(!mutex_owned(SD_MUTEX(un)));
21030 21028 ASSERT(data_len == 24); /* required by scsi spec */
21031 21029
21032 21030 SD_TRACE(SD_LOG_IO, un,
21033 21031 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
21034 21032
21035 21033 if (usr_bufp == NULL) {
21036 21034 return (EINVAL);
21037 21035 }
21038 21036
21039 21037 bzero(&cdb, sizeof (cdb));
21040 21038 bzero(&ucmd_buf, sizeof (ucmd_buf));
21041 21039 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21042 21040 prp = kmem_zalloc(data_len, KM_SLEEP);
21043 21041
21044 21042 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
21045 21043 cdb.cdb_opaque[1] = usr_cmd;
21046 21044 FORMG1COUNT(&cdb, data_len);
21047 21045
21048 21046 ucmd_buf.uscsi_cdb = (char *)&cdb;
21049 21047 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21050 21048 ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
21051 21049 ucmd_buf.uscsi_buflen = data_len;
21052 21050 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21053 21051 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21054 21052 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21055 21053 ucmd_buf.uscsi_timeout = 60;
21056 21054
21057 21055 switch (usr_cmd) {
21058 21056 case SD_SCSI3_REGISTER: {
21059 21057 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21060 21058
21061 21059 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21062 21060 bcopy(ptr->newkey.key, prp->service_key,
21063 21061 MHIOC_RESV_KEY_SIZE);
21064 21062 prp->aptpl = ptr->aptpl;
21065 21063 break;
21066 21064 }
21067 21065 case SD_SCSI3_CLEAR: {
21068 21066 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21069 21067
21070 21068 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21071 21069 break;
21072 21070 }
21073 21071 case SD_SCSI3_RESERVE:
21074 21072 case SD_SCSI3_RELEASE: {
21075 21073 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21076 21074
21077 21075 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21078 21076 prp->scope_address = BE_32(ptr->scope_specific_addr);
21079 21077 cdb.cdb_opaque[2] = ptr->type;
21080 21078 break;
21081 21079 }
21082 21080 case SD_SCSI3_PREEMPTANDABORT: {
21083 21081 mhioc_preemptandabort_t *ptr =
21084 21082 (mhioc_preemptandabort_t *)usr_bufp;
21085 21083
21086 21084 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21087 21085 bcopy(ptr->victim_key.key, prp->service_key,
21088 21086 MHIOC_RESV_KEY_SIZE);
21089 21087 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
21090 21088 cdb.cdb_opaque[2] = ptr->resvdesc.type;
21091 21089 ucmd_buf.uscsi_flags |= USCSI_HEAD;
21092 21090 break;
21093 21091 }
21094 21092 case SD_SCSI3_REGISTERANDIGNOREKEY:
21095 21093 {
21096 21094 mhioc_registerandignorekey_t *ptr;
21097 21095 ptr = (mhioc_registerandignorekey_t *)usr_bufp;
21098 21096 bcopy(ptr->newkey.key,
21099 21097 prp->service_key, MHIOC_RESV_KEY_SIZE);
21100 21098 prp->aptpl = ptr->aptpl;
21101 21099 break;
21102 21100 }
21103 21101 default:
21104 21102 ASSERT(FALSE);
21105 21103 break;
21106 21104 }
21107 21105
21108 21106 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21109 21107 UIO_SYSSPACE, SD_PATH_STANDARD);
21110 21108
21111 21109 switch (status) {
21112 21110 case 0:
21113 21111 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21114 21112 break; /* Success! */
21115 21113 case EIO:
21116 21114 switch (ucmd_buf.uscsi_status) {
21117 21115 case STATUS_RESERVATION_CONFLICT:
21118 21116 status = EACCES;
21119 21117 break;
21120 21118 case STATUS_CHECK:
21121 21119 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21122 21120 (scsi_sense_key((uint8_t *)&sense_buf) ==
21123 21121 KEY_ILLEGAL_REQUEST)) {
21124 21122 status = ENOTSUP;
21125 21123 }
21126 21124 break;
21127 21125 default:
21128 21126 break;
21129 21127 }
21130 21128 break;
21131 21129 default:
21132 21130 break;
21133 21131 }
21134 21132
21135 21133 kmem_free(prp, data_len);
21136 21134 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21137 21135 return (status);
21138 21136 }
21139 21137
21140 21138
21141 21139 /*
21142 21140 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21143 21141 *
21144 21142 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21145 21143 *
21146 21144 * Arguments: un - pointer to the target's soft state struct
21147 21145 * dkc - pointer to the callback structure
21148 21146 *
21149 21147 * Return Code: 0 - success
21150 21148 * errno-type error code
21151 21149 *
21152 21150 * Context: kernel thread context only.
21153 21151 *
21154 21152 * _______________________________________________________________
21155 21153 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21156 21154 * |FLUSH_VOLATILE| | operation |
21157 21155 * |______________|______________|_________________________________|
21158 21156 * | 0 | NULL | Synchronous flush on both |
21159 21157 * | | | volatile and non-volatile cache |
21160 21158 * |______________|______________|_________________________________|
21161 21159 * | 1 | NULL | Synchronous flush on volatile |
21162 21160 * | | | cache; disk drivers may suppress|
21163 21161 * | | | flush if disk table indicates |
21164 21162 * | | | non-volatile cache |
21165 21163 * |______________|______________|_________________________________|
21166 21164 * | 0 | !NULL | Asynchronous flush on both |
21167 21165 * | | | volatile and non-volatile cache;|
21168 21166 * |______________|______________|_________________________________|
21169 21167 * | 1 | !NULL | Asynchronous flush on volatile |
21170 21168 * | | | cache; disk drivers may suppress|
21171 21169 * | | | flush if disk table indicates |
21172 21170 * | | | non-volatile cache |
21173 21171 * |______________|______________|_________________________________|
21174 21172 *
21175 21173 */
21176 21174
21177 21175 static int
21178 21176 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
21179 21177 {
21180 21178 struct sd_uscsi_info *uip;
21181 21179 struct uscsi_cmd *uscmd;
21182 21180 union scsi_cdb *cdb;
21183 21181 struct buf *bp;
21184 21182 int rval = 0;
21185 21183 int is_async;
21186 21184
21187 21185 SD_TRACE(SD_LOG_IO, un,
21188 21186 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
21189 21187
21190 21188 ASSERT(un != NULL);
21191 21189 ASSERT(!mutex_owned(SD_MUTEX(un)));
21192 21190
21193 21191 if (dkc == NULL || dkc->dkc_callback == NULL) {
21194 21192 is_async = FALSE;
21195 21193 } else {
21196 21194 is_async = TRUE;
21197 21195 }
21198 21196
21199 21197 mutex_enter(SD_MUTEX(un));
21200 21198 /* check whether cache flush should be suppressed */
21201 21199 if (un->un_f_suppress_cache_flush == TRUE) {
21202 21200 mutex_exit(SD_MUTEX(un));
21203 21201 /*
21204 21202 * suppress the cache flush if the device is told to do
21205 21203 * so by sd.conf or disk table
21206 21204 */
21207 21205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21208 21206 skip the cache flush since suppress_cache_flush is %d!\n",
21209 21207 un->un_f_suppress_cache_flush);
21210 21208
21211 21209 if (is_async == TRUE) {
21212 21210 /* invoke callback for asynchronous flush */
21213 21211 (*dkc->dkc_callback)(dkc->dkc_cookie, 0);
21214 21212 }
21215 21213 return (rval);
21216 21214 }
21217 21215 mutex_exit(SD_MUTEX(un));
21218 21216
21219 21217 /*
21220 21218 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21221 21219 * set properly
21222 21220 */
21223 21221 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
21224 21222 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
21225 21223
21226 21224 mutex_enter(SD_MUTEX(un));
21227 21225 if (dkc != NULL && un->un_f_sync_nv_supported &&
21228 21226 (dkc->dkc_flag & FLUSH_VOLATILE)) {
21229 21227 /*
21230 21228 * if the device supports SYNC_NV bit, turn on
21231 21229 * the SYNC_NV bit to only flush volatile cache
21232 21230 */
21233 21231 cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
21234 21232 }
21235 21233 mutex_exit(SD_MUTEX(un));
21236 21234
21237 21235 /*
21238 21236 * First get some memory for the uscsi_cmd struct and cdb
21239 21237 * and initialize for SYNCHRONIZE_CACHE cmd.
21240 21238 */
21241 21239 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21242 21240 uscmd->uscsi_cdblen = CDB_GROUP1;
21243 21241 uscmd->uscsi_cdb = (caddr_t)cdb;
21244 21242 uscmd->uscsi_bufaddr = NULL;
21245 21243 uscmd->uscsi_buflen = 0;
21246 21244 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21247 21245 uscmd->uscsi_rqlen = SENSE_LENGTH;
21248 21246 uscmd->uscsi_rqresid = SENSE_LENGTH;
21249 21247 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21250 21248 uscmd->uscsi_timeout = sd_io_time;
21251 21249
21252 21250 /*
21253 21251 * Allocate an sd_uscsi_info struct and fill it with the info
21254 21252 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21255 21253 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21256 21254 * since we allocate the buf here in this function, we do not
21257 21255 * need to preserve the prior contents of b_private.
21258 21256 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21259 21257 */
21260 21258 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
21261 21259 uip->ui_flags = SD_PATH_DIRECT;
21262 21260 uip->ui_cmdp = uscmd;
21263 21261
21264 21262 bp = getrbuf(KM_SLEEP);
21265 21263 bp->b_private = uip;
21266 21264
21267 21265 /*
21268 21266 * Setup buffer to carry uscsi request.
21269 21267 */
21270 21268 bp->b_flags = B_BUSY;
21271 21269 bp->b_bcount = 0;
21272 21270 bp->b_blkno = 0;
21273 21271
21274 21272 if (is_async == TRUE) {
21275 21273 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
21276 21274 uip->ui_dkc = *dkc;
21277 21275 }
21278 21276
21279 21277 bp->b_edev = SD_GET_DEV(un);
21280 21278 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */
21281 21279
21282 21280 /*
21283 21281 * Unset un_f_sync_cache_required flag
21284 21282 */
21285 21283 mutex_enter(SD_MUTEX(un));
21286 21284 un->un_f_sync_cache_required = FALSE;
21287 21285 mutex_exit(SD_MUTEX(un));
21288 21286
21289 21287 (void) sd_uscsi_strategy(bp);
21290 21288
21291 21289 /*
21292 21290 * If synchronous request, wait for completion
21293 21291 * If async just return and let b_iodone callback
21294 21292 * cleanup.
21295 21293 * NOTE: On return, u_ncmds_in_driver will be decremented,
21296 21294 * but it was also incremented in sd_uscsi_strategy(), so
21297 21295 * we should be ok.
21298 21296 */
21299 21297 if (is_async == FALSE) {
21300 21298 (void) biowait(bp);
21301 21299 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
21302 21300 }
21303 21301
21304 21302 return (rval);
21305 21303 }
21306 21304
21307 21305
21308 21306 static int
21309 21307 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
21310 21308 {
21311 21309 struct sd_uscsi_info *uip;
21312 21310 struct uscsi_cmd *uscmd;
21313 21311 uint8_t *sense_buf;
21314 21312 struct sd_lun *un;
21315 21313 int status;
21316 21314 union scsi_cdb *cdb;
21317 21315
21318 21316 uip = (struct sd_uscsi_info *)(bp->b_private);
21319 21317 ASSERT(uip != NULL);
21320 21318
21321 21319 uscmd = uip->ui_cmdp;
21322 21320 ASSERT(uscmd != NULL);
21323 21321
21324 21322 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
21325 21323 ASSERT(sense_buf != NULL);
21326 21324
21327 21325 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
21328 21326 ASSERT(un != NULL);
21329 21327
21330 21328 cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
21331 21329
21332 21330 status = geterror(bp);
21333 21331 switch (status) {
21334 21332 case 0:
21335 21333 break; /* Success! */
21336 21334 case EIO:
21337 21335 switch (uscmd->uscsi_status) {
21338 21336 case STATUS_RESERVATION_CONFLICT:
21339 21337 /* Ignore reservation conflict */
21340 21338 status = 0;
21341 21339 goto done;
21342 21340
21343 21341 case STATUS_CHECK:
21344 21342 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
21345 21343 (scsi_sense_key(sense_buf) ==
21346 21344 KEY_ILLEGAL_REQUEST)) {
21347 21345 /* Ignore Illegal Request error */
21348 21346 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
21349 21347 mutex_enter(SD_MUTEX(un));
21350 21348 un->un_f_sync_nv_supported = FALSE;
21351 21349 mutex_exit(SD_MUTEX(un));
21352 21350 status = 0;
21353 21351 SD_TRACE(SD_LOG_IO, un,
21354 21352 "un_f_sync_nv_supported \
21355 21353 is set to false.\n");
21356 21354 goto done;
21357 21355 }
21358 21356
21359 21357 mutex_enter(SD_MUTEX(un));
21360 21358 un->un_f_sync_cache_supported = FALSE;
21361 21359 mutex_exit(SD_MUTEX(un));
21362 21360 SD_TRACE(SD_LOG_IO, un,
21363 21361 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21364 21362 un_f_sync_cache_supported set to false \
21365 21363 with asc = %x, ascq = %x\n",
21366 21364 scsi_sense_asc(sense_buf),
21367 21365 scsi_sense_ascq(sense_buf));
21368 21366 status = ENOTSUP;
21369 21367 goto done;
21370 21368 }
21371 21369 break;
21372 21370 default:
21373 21371 break;
21374 21372 }
21375 21373 /* FALLTHRU */
21376 21374 default:
21377 21375 /*
21378 21376 * Turn on the un_f_sync_cache_required flag
21379 21377 * since the SYNC CACHE command failed
21380 21378 */
21381 21379 mutex_enter(SD_MUTEX(un));
21382 21380 un->un_f_sync_cache_required = TRUE;
21383 21381 mutex_exit(SD_MUTEX(un));
21384 21382
21385 21383 /*
21386 21384 * Don't log an error message if this device
21387 21385 * has removable media.
21388 21386 */
21389 21387 if (!un->un_f_has_removable_media) {
21390 21388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21391 21389 "SYNCHRONIZE CACHE command failed (%d)\n", status);
21392 21390 }
21393 21391 break;
21394 21392 }
21395 21393
21396 21394 done:
21397 21395 if (uip->ui_dkc.dkc_callback != NULL) {
21398 21396 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21399 21397 }
21400 21398
21401 21399 ASSERT((bp->b_flags & B_REMAPPED) == 0);
21402 21400 freerbuf(bp);
21403 21401 kmem_free(uip, sizeof (struct sd_uscsi_info));
21404 21402 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21405 21403 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21406 21404 kmem_free(uscmd, sizeof (struct uscsi_cmd));
21407 21405
21408 21406 return (status);
21409 21407 }
21410 21408
21411 21409
21412 21410 /*
21413 21411 * Function: sd_send_scsi_GET_CONFIGURATION
21414 21412 *
21415 21413 * Description: Issues the get configuration command to the device.
21416 21414 * Called from sd_check_for_writable_cd & sd_get_media_info
21417 21415 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21418 21416 * Arguments: ssc
21419 21417 * ucmdbuf
21420 21418 * rqbuf
21421 21419 * rqbuflen
21422 21420 * bufaddr
21423 21421 * buflen
21424 21422 * path_flag
21425 21423 *
21426 21424 * Return Code: 0 - Success
21427 21425 * errno return code from sd_ssc_send()
21428 21426 *
21429 21427 * Context: Can sleep. Does not return until command is completed.
21430 21428 *
21431 21429 */
21432 21430
21433 21431 static int
21434 21432 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21435 21433 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21436 21434 int path_flag)
21437 21435 {
21438 21436 char cdb[CDB_GROUP1];
21439 21437 int status;
21440 21438 struct sd_lun *un;
21441 21439
21442 21440 ASSERT(ssc != NULL);
21443 21441 un = ssc->ssc_un;
21444 21442 ASSERT(un != NULL);
21445 21443 ASSERT(!mutex_owned(SD_MUTEX(un)));
21446 21444 ASSERT(bufaddr != NULL);
21447 21445 ASSERT(ucmdbuf != NULL);
21448 21446 ASSERT(rqbuf != NULL);
21449 21447
21450 21448 SD_TRACE(SD_LOG_IO, un,
21451 21449 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
21452 21450
21453 21451 bzero(cdb, sizeof (cdb));
21454 21452 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21455 21453 bzero(rqbuf, rqbuflen);
21456 21454 bzero(bufaddr, buflen);
21457 21455
21458 21456 /*
21459 21457 * Set up cdb field for the get configuration command.
21460 21458 */
21461 21459 cdb[0] = SCMD_GET_CONFIGURATION;
21462 21460 cdb[1] = 0x02; /* Requested Type */
21463 21461 cdb[8] = SD_PROFILE_HEADER_LEN;
21464 21462 ucmdbuf->uscsi_cdb = cdb;
21465 21463 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21466 21464 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21467 21465 ucmdbuf->uscsi_buflen = buflen;
21468 21466 ucmdbuf->uscsi_timeout = sd_io_time;
21469 21467 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21470 21468 ucmdbuf->uscsi_rqlen = rqbuflen;
21471 21469 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21472 21470
21473 21471 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21474 21472 UIO_SYSSPACE, path_flag);
21475 21473
21476 21474 switch (status) {
21477 21475 case 0:
21478 21476 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21479 21477 break; /* Success! */
21480 21478 case EIO:
21481 21479 switch (ucmdbuf->uscsi_status) {
21482 21480 case STATUS_RESERVATION_CONFLICT:
21483 21481 status = EACCES;
21484 21482 break;
21485 21483 default:
21486 21484 break;
21487 21485 }
21488 21486 break;
21489 21487 default:
21490 21488 break;
21491 21489 }
21492 21490
21493 21491 if (status == 0) {
21494 21492 SD_DUMP_MEMORY(un, SD_LOG_IO,
21495 21493 "sd_send_scsi_GET_CONFIGURATION: data",
21496 21494 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21497 21495 }
21498 21496
21499 21497 SD_TRACE(SD_LOG_IO, un,
21500 21498 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21501 21499
21502 21500 return (status);
21503 21501 }
21504 21502
21505 21503 /*
21506 21504 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21507 21505 *
21508 21506 * Description: Issues the get configuration command to the device to
21509 21507 * retrieve a specific feature. Called from
21510 21508 * sd_check_for_writable_cd & sd_set_mmc_caps.
21511 21509 * Arguments: ssc
21512 21510 * ucmdbuf
21513 21511 * rqbuf
21514 21512 * rqbuflen
21515 21513 * bufaddr
21516 21514 * buflen
21517 21515 * feature
21518 21516 *
21519 21517 * Return Code: 0 - Success
21520 21518 * errno return code from sd_ssc_send()
21521 21519 *
21522 21520 * Context: Can sleep. Does not return until command is completed.
21523 21521 *
21524 21522 */
21525 21523 static int
21526 21524 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
21527 21525 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
21528 21526 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag)
21529 21527 {
21530 21528 char cdb[CDB_GROUP1];
21531 21529 int status;
21532 21530 struct sd_lun *un;
21533 21531
21534 21532 ASSERT(ssc != NULL);
21535 21533 un = ssc->ssc_un;
21536 21534 ASSERT(un != NULL);
21537 21535 ASSERT(!mutex_owned(SD_MUTEX(un)));
21538 21536 ASSERT(bufaddr != NULL);
21539 21537 ASSERT(ucmdbuf != NULL);
21540 21538 ASSERT(rqbuf != NULL);
21541 21539
21542 21540 SD_TRACE(SD_LOG_IO, un,
21543 21541 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
21544 21542
21545 21543 bzero(cdb, sizeof (cdb));
21546 21544 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21547 21545 bzero(rqbuf, rqbuflen);
21548 21546 bzero(bufaddr, buflen);
21549 21547
21550 21548 /*
21551 21549 * Set up cdb field for the get configuration command.
21552 21550 */
21553 21551 cdb[0] = SCMD_GET_CONFIGURATION;
21554 21552 cdb[1] = 0x02; /* Requested Type */
21555 21553 cdb[3] = feature;
21556 21554 cdb[8] = buflen;
21557 21555 ucmdbuf->uscsi_cdb = cdb;
21558 21556 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21559 21557 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21560 21558 ucmdbuf->uscsi_buflen = buflen;
21561 21559 ucmdbuf->uscsi_timeout = sd_io_time;
21562 21560 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21563 21561 ucmdbuf->uscsi_rqlen = rqbuflen;
21564 21562 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21565 21563
21566 21564 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21567 21565 UIO_SYSSPACE, path_flag);
21568 21566
21569 21567 switch (status) {
21570 21568 case 0:
21571 21569
21572 21570 break; /* Success! */
21573 21571 case EIO:
21574 21572 switch (ucmdbuf->uscsi_status) {
21575 21573 case STATUS_RESERVATION_CONFLICT:
21576 21574 status = EACCES;
21577 21575 break;
21578 21576 default:
21579 21577 break;
21580 21578 }
21581 21579 break;
21582 21580 default:
21583 21581 break;
21584 21582 }
21585 21583
21586 21584 if (status == 0) {
21587 21585 SD_DUMP_MEMORY(un, SD_LOG_IO,
21588 21586 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21589 21587 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21590 21588 }
21591 21589
21592 21590 SD_TRACE(SD_LOG_IO, un,
21593 21591 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21594 21592
21595 21593 return (status);
21596 21594 }
21597 21595
21598 21596
21599 21597 /*
21600 21598 * Function: sd_send_scsi_MODE_SENSE
21601 21599 *
21602 21600 * Description: Utility function for issuing a scsi MODE SENSE command.
21603 21601 * Note: This routine uses a consistent implementation for Group0,
21604 21602 * Group1, and Group2 commands across all platforms. ATAPI devices
21605 21603 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21606 21604 *
21607 21605 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21608 21606 * structure for this target.
21609 21607 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21610 21608 * CDB_GROUP[1|2] (10 byte).
21611 21609 * bufaddr - buffer for page data retrieved from the target.
21612 21610 * buflen - size of page to be retrieved.
21613 21611 * page_code - page code of data to be retrieved from the target.
21614 21612 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21615 21613 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21616 21614 * to use the USCSI "direct" chain and bypass the normal
21617 21615 * command waitq.
21618 21616 *
21619 21617 * Return Code: 0 - Success
21620 21618 * errno return code from sd_ssc_send()
21621 21619 *
21622 21620 * Context: Can sleep. Does not return until command is completed.
21623 21621 */
21624 21622
21625 21623 static int
21626 21624 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21627 21625 size_t buflen, uchar_t page_code, int path_flag)
21628 21626 {
21629 21627 struct scsi_extended_sense sense_buf;
21630 21628 union scsi_cdb cdb;
21631 21629 struct uscsi_cmd ucmd_buf;
21632 21630 int status;
21633 21631 int headlen;
21634 21632 struct sd_lun *un;
21635 21633
21636 21634 ASSERT(ssc != NULL);
21637 21635 un = ssc->ssc_un;
21638 21636 ASSERT(un != NULL);
21639 21637 ASSERT(!mutex_owned(SD_MUTEX(un)));
21640 21638 ASSERT(bufaddr != NULL);
21641 21639 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21642 21640 (cdbsize == CDB_GROUP2));
21643 21641
21644 21642 SD_TRACE(SD_LOG_IO, un,
21645 21643 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
21646 21644
21647 21645 bzero(&cdb, sizeof (cdb));
21648 21646 bzero(&ucmd_buf, sizeof (ucmd_buf));
21649 21647 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21650 21648 bzero(bufaddr, buflen);
21651 21649
21652 21650 if (cdbsize == CDB_GROUP0) {
21653 21651 cdb.scc_cmd = SCMD_MODE_SENSE;
21654 21652 cdb.cdb_opaque[2] = page_code;
21655 21653 FORMG0COUNT(&cdb, buflen);
21656 21654 headlen = MODE_HEADER_LENGTH;
21657 21655 } else {
21658 21656 cdb.scc_cmd = SCMD_MODE_SENSE_G1;
21659 21657 cdb.cdb_opaque[2] = page_code;
21660 21658 FORMG1COUNT(&cdb, buflen);
21661 21659 headlen = MODE_HEADER_LENGTH_GRP2;
21662 21660 }
21663 21661
21664 21662 ASSERT(headlen <= buflen);
21665 21663 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21666 21664
21667 21665 ucmd_buf.uscsi_cdb = (char *)&cdb;
21668 21666 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21669 21667 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21670 21668 ucmd_buf.uscsi_buflen = buflen;
21671 21669 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21672 21670 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21673 21671 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21674 21672 ucmd_buf.uscsi_timeout = 60;
21675 21673
21676 21674 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21677 21675 UIO_SYSSPACE, path_flag);
21678 21676
21679 21677 switch (status) {
21680 21678 case 0:
21681 21679 /*
21682 21680 * sr_check_wp() uses 0x3f page code and check the header of
21683 21681 * mode page to determine if target device is write-protected.
21684 21682 * But some USB devices return 0 bytes for 0x3f page code. For
21685 21683 * this case, make sure that mode page header is returned at
21686 21684 * least.
21687 21685 */
21688 21686 if (buflen - ucmd_buf.uscsi_resid < headlen) {
21689 21687 status = EIO;
21690 21688 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
21691 21689 "mode page header is not returned");
21692 21690 }
21693 21691 break; /* Success! */
21694 21692 case EIO:
21695 21693 switch (ucmd_buf.uscsi_status) {
21696 21694 case STATUS_RESERVATION_CONFLICT:
21697 21695 status = EACCES;
21698 21696 break;
21699 21697 default:
21700 21698 break;
21701 21699 }
21702 21700 break;
21703 21701 default:
21704 21702 break;
21705 21703 }
21706 21704
21707 21705 if (status == 0) {
21708 21706 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
21709 21707 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21710 21708 }
21711 21709 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
21712 21710
21713 21711 return (status);
21714 21712 }
21715 21713
21716 21714
21717 21715 /*
21718 21716 * Function: sd_send_scsi_MODE_SELECT
21719 21717 *
21720 21718 * Description: Utility function for issuing a scsi MODE SELECT command.
21721 21719 * Note: This routine uses a consistent implementation for Group0,
21722 21720 * Group1, and Group2 commands across all platforms. ATAPI devices
21723 21721 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21724 21722 *
21725 21723 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21726 21724 * structure for this target.
21727 21725 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21728 21726 * CDB_GROUP[1|2] (10 byte).
21729 21727 * bufaddr - buffer for page data retrieved from the target.
21730 21728 * buflen - size of page to be retrieved.
21731 21729 * save_page - boolean to determin if SP bit should be set.
21732 21730 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21733 21731 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21734 21732 * to use the USCSI "direct" chain and bypass the normal
21735 21733 * command waitq.
21736 21734 *
21737 21735 * Return Code: 0 - Success
21738 21736 * errno return code from sd_ssc_send()
21739 21737 *
21740 21738 * Context: Can sleep. Does not return until command is completed.
21741 21739 */
21742 21740
21743 21741 static int
21744 21742 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21745 21743 size_t buflen, uchar_t save_page, int path_flag)
21746 21744 {
21747 21745 struct scsi_extended_sense sense_buf;
21748 21746 union scsi_cdb cdb;
21749 21747 struct uscsi_cmd ucmd_buf;
21750 21748 int status;
21751 21749 struct sd_lun *un;
21752 21750
21753 21751 ASSERT(ssc != NULL);
21754 21752 un = ssc->ssc_un;
21755 21753 ASSERT(un != NULL);
21756 21754 ASSERT(!mutex_owned(SD_MUTEX(un)));
21757 21755 ASSERT(bufaddr != NULL);
21758 21756 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21759 21757 (cdbsize == CDB_GROUP2));
21760 21758
21761 21759 SD_TRACE(SD_LOG_IO, un,
21762 21760 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
21763 21761
21764 21762 bzero(&cdb, sizeof (cdb));
21765 21763 bzero(&ucmd_buf, sizeof (ucmd_buf));
21766 21764 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21767 21765
21768 21766 /* Set the PF bit for many third party drives */
21769 21767 cdb.cdb_opaque[1] = 0x10;
21770 21768
21771 21769 /* Set the savepage(SP) bit if given */
21772 21770 if (save_page == SD_SAVE_PAGE) {
21773 21771 cdb.cdb_opaque[1] |= 0x01;
21774 21772 }
21775 21773
21776 21774 if (cdbsize == CDB_GROUP0) {
21777 21775 cdb.scc_cmd = SCMD_MODE_SELECT;
21778 21776 FORMG0COUNT(&cdb, buflen);
21779 21777 } else {
21780 21778 cdb.scc_cmd = SCMD_MODE_SELECT_G1;
21781 21779 FORMG1COUNT(&cdb, buflen);
21782 21780 }
21783 21781
21784 21782 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21785 21783
21786 21784 ucmd_buf.uscsi_cdb = (char *)&cdb;
21787 21785 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21788 21786 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21789 21787 ucmd_buf.uscsi_buflen = buflen;
21790 21788 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21791 21789 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21792 21790 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21793 21791 ucmd_buf.uscsi_timeout = 60;
21794 21792
21795 21793 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21796 21794 UIO_SYSSPACE, path_flag);
21797 21795
21798 21796 switch (status) {
21799 21797 case 0:
21800 21798 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21801 21799 break; /* Success! */
21802 21800 case EIO:
21803 21801 switch (ucmd_buf.uscsi_status) {
21804 21802 case STATUS_RESERVATION_CONFLICT:
21805 21803 status = EACCES;
21806 21804 break;
21807 21805 default:
21808 21806 break;
21809 21807 }
21810 21808 break;
21811 21809 default:
21812 21810 break;
21813 21811 }
21814 21812
21815 21813 if (status == 0) {
21816 21814 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
21817 21815 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21818 21816 }
21819 21817 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
21820 21818
21821 21819 return (status);
21822 21820 }
21823 21821
21824 21822
21825 21823 /*
21826 21824 * Function: sd_send_scsi_RDWR
21827 21825 *
21828 21826 * Description: Issue a scsi READ or WRITE command with the given parameters.
21829 21827 *
21830 21828 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21831 21829 * structure for this target.
21832 21830 * cmd: SCMD_READ or SCMD_WRITE
21833 21831 * bufaddr: Address of caller's buffer to receive the RDWR data
21834 21832 * buflen: Length of caller's buffer receive the RDWR data.
21835 21833 * start_block: Block number for the start of the RDWR operation.
21836 21834 * (Assumes target-native block size.)
21837 21835 * residp: Pointer to variable to receive the redisual of the
21838 21836 * RDWR operation (may be NULL of no residual requested).
21839 21837 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21840 21838 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21841 21839 * to use the USCSI "direct" chain and bypass the normal
21842 21840 * command waitq.
21843 21841 *
21844 21842 * Return Code: 0 - Success
21845 21843 * errno return code from sd_ssc_send()
21846 21844 *
21847 21845 * Context: Can sleep. Does not return until command is completed.
21848 21846 */
21849 21847
21850 21848 static int
21851 21849 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
21852 21850 size_t buflen, daddr_t start_block, int path_flag)
21853 21851 {
21854 21852 struct scsi_extended_sense sense_buf;
21855 21853 union scsi_cdb cdb;
21856 21854 struct uscsi_cmd ucmd_buf;
21857 21855 uint32_t block_count;
21858 21856 int status;
21859 21857 int cdbsize;
21860 21858 uchar_t flag;
21861 21859 struct sd_lun *un;
21862 21860
21863 21861 ASSERT(ssc != NULL);
21864 21862 un = ssc->ssc_un;
21865 21863 ASSERT(un != NULL);
21866 21864 ASSERT(!mutex_owned(SD_MUTEX(un)));
21867 21865 ASSERT(bufaddr != NULL);
21868 21866 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
21869 21867
21870 21868 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
21871 21869
21872 21870 if (un->un_f_tgt_blocksize_is_valid != TRUE) {
21873 21871 return (EINVAL);
21874 21872 }
21875 21873
21876 21874 mutex_enter(SD_MUTEX(un));
21877 21875 block_count = SD_BYTES2TGTBLOCKS(un, buflen);
21878 21876 mutex_exit(SD_MUTEX(un));
21879 21877
21880 21878 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
21881 21879
21882 21880 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
21883 21881 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21884 21882 bufaddr, buflen, start_block, block_count);
21885 21883
21886 21884 bzero(&cdb, sizeof (cdb));
21887 21885 bzero(&ucmd_buf, sizeof (ucmd_buf));
21888 21886 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21889 21887
21890 21888 /* Compute CDB size to use */
21891 21889 if (start_block > 0xffffffff)
21892 21890 cdbsize = CDB_GROUP4;
21893 21891 else if ((start_block & 0xFFE00000) ||
21894 21892 (un->un_f_cfg_is_atapi == TRUE))
21895 21893 cdbsize = CDB_GROUP1;
21896 21894 else
21897 21895 cdbsize = CDB_GROUP0;
21898 21896
21899 21897 switch (cdbsize) {
21900 21898 case CDB_GROUP0: /* 6-byte CDBs */
21901 21899 cdb.scc_cmd = cmd;
21902 21900 FORMG0ADDR(&cdb, start_block);
21903 21901 FORMG0COUNT(&cdb, block_count);
21904 21902 break;
21905 21903 case CDB_GROUP1: /* 10-byte CDBs */
21906 21904 cdb.scc_cmd = cmd | SCMD_GROUP1;
21907 21905 FORMG1ADDR(&cdb, start_block);
21908 21906 FORMG1COUNT(&cdb, block_count);
21909 21907 break;
21910 21908 case CDB_GROUP4: /* 16-byte CDBs */
21911 21909 cdb.scc_cmd = cmd | SCMD_GROUP4;
21912 21910 FORMG4LONGADDR(&cdb, (uint64_t)start_block);
21913 21911 FORMG4COUNT(&cdb, block_count);
21914 21912 break;
21915 21913 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */
21916 21914 default:
21917 21915 /* All others reserved */
21918 21916 return (EINVAL);
21919 21917 }
21920 21918
21921 21919 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21922 21920 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21923 21921
21924 21922 ucmd_buf.uscsi_cdb = (char *)&cdb;
21925 21923 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21926 21924 ucmd_buf.uscsi_bufaddr = bufaddr;
21927 21925 ucmd_buf.uscsi_buflen = buflen;
21928 21926 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21929 21927 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21930 21928 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
21931 21929 ucmd_buf.uscsi_timeout = 60;
21932 21930 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21933 21931 UIO_SYSSPACE, path_flag);
21934 21932
21935 21933 switch (status) {
21936 21934 case 0:
21937 21935 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21938 21936 break; /* Success! */
21939 21937 case EIO:
21940 21938 switch (ucmd_buf.uscsi_status) {
21941 21939 case STATUS_RESERVATION_CONFLICT:
21942 21940 status = EACCES;
21943 21941 break;
21944 21942 default:
21945 21943 break;
21946 21944 }
21947 21945 break;
21948 21946 default:
21949 21947 break;
21950 21948 }
21951 21949
21952 21950 if (status == 0) {
21953 21951 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
21954 21952 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21955 21953 }
21956 21954
21957 21955 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
21958 21956
21959 21957 return (status);
21960 21958 }
21961 21959
21962 21960
21963 21961 /*
21964 21962 * Function: sd_send_scsi_LOG_SENSE
21965 21963 *
21966 21964 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21967 21965 *
21968 21966 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21969 21967 * structure for this target.
21970 21968 *
21971 21969 * Return Code: 0 - Success
21972 21970 * errno return code from sd_ssc_send()
21973 21971 *
21974 21972 * Context: Can sleep. Does not return until command is completed.
21975 21973 */
21976 21974
21977 21975 static int
21978 21976 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
21979 21977 uchar_t page_code, uchar_t page_control, uint16_t param_ptr,
21980 21978 int path_flag)
21981 21979
21982 21980 {
21983 21981 struct scsi_extended_sense sense_buf;
21984 21982 union scsi_cdb cdb;
21985 21983 struct uscsi_cmd ucmd_buf;
21986 21984 int status;
21987 21985 struct sd_lun *un;
21988 21986
21989 21987 ASSERT(ssc != NULL);
21990 21988 un = ssc->ssc_un;
21991 21989 ASSERT(un != NULL);
21992 21990 ASSERT(!mutex_owned(SD_MUTEX(un)));
21993 21991
21994 21992 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
21995 21993
21996 21994 bzero(&cdb, sizeof (cdb));
21997 21995 bzero(&ucmd_buf, sizeof (ucmd_buf));
21998 21996 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21999 21997
22000 21998 cdb.scc_cmd = SCMD_LOG_SENSE_G1;
22001 21999 cdb.cdb_opaque[2] = (page_control << 6) | page_code;
22002 22000 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
22003 22001 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
22004 22002 FORMG1COUNT(&cdb, buflen);
22005 22003
22006 22004 ucmd_buf.uscsi_cdb = (char *)&cdb;
22007 22005 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22008 22006 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22009 22007 ucmd_buf.uscsi_buflen = buflen;
22010 22008 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
22011 22009 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
22012 22010 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
22013 22011 ucmd_buf.uscsi_timeout = 60;
22014 22012
22015 22013 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22016 22014 UIO_SYSSPACE, path_flag);
22017 22015
22018 22016 switch (status) {
22019 22017 case 0:
22020 22018 break;
22021 22019 case EIO:
22022 22020 switch (ucmd_buf.uscsi_status) {
22023 22021 case STATUS_RESERVATION_CONFLICT:
22024 22022 status = EACCES;
22025 22023 break;
22026 22024 case STATUS_CHECK:
22027 22025 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
22028 22026 (scsi_sense_key((uint8_t *)&sense_buf) ==
22029 22027 KEY_ILLEGAL_REQUEST) &&
22030 22028 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
22031 22029 /*
22032 22030 * ASC 0x24: INVALID FIELD IN CDB
22033 22031 */
22034 22032 switch (page_code) {
22035 22033 case START_STOP_CYCLE_PAGE:
22036 22034 /*
22037 22035 * The start stop cycle counter is
22038 22036 * implemented as page 0x31 in earlier
22039 22037 * generation disks. In new generation
22040 22038 * disks the start stop cycle counter is
22041 22039 * implemented as page 0xE. To properly
22042 22040 * handle this case if an attempt for
22043 22041 * log page 0xE is made and fails we
22044 22042 * will try again using page 0x31.
22045 22043 *
22046 22044 * Network storage BU committed to
22047 22045 * maintain the page 0x31 for this
22048 22046 * purpose and will not have any other
22049 22047 * page implemented with page code 0x31
22050 22048 * until all disks transition to the
22051 22049 * standard page.
22052 22050 */
22053 22051 mutex_enter(SD_MUTEX(un));
22054 22052 un->un_start_stop_cycle_page =
22055 22053 START_STOP_CYCLE_VU_PAGE;
22056 22054 cdb.cdb_opaque[2] =
22057 22055 (char)(page_control << 6) |
22058 22056 un->un_start_stop_cycle_page;
22059 22057 mutex_exit(SD_MUTEX(un));
22060 22058 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22061 22059 status = sd_ssc_send(
22062 22060 ssc, &ucmd_buf, FKIOCTL,
22063 22061 UIO_SYSSPACE, path_flag);
22064 22062
22065 22063 break;
22066 22064 case TEMPERATURE_PAGE:
22067 22065 status = ENOTTY;
22068 22066 break;
22069 22067 default:
22070 22068 break;
22071 22069 }
22072 22070 }
22073 22071 break;
22074 22072 default:
22075 22073 break;
22076 22074 }
22077 22075 break;
22078 22076 default:
22079 22077 break;
22080 22078 }
22081 22079
22082 22080 if (status == 0) {
22083 22081 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22084 22082 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
22085 22083 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
22086 22084 }
22087 22085
22088 22086 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
22089 22087
22090 22088 return (status);
22091 22089 }
22092 22090
22093 22091
22094 22092 /*
22095 22093 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22096 22094 *
22097 22095 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22098 22096 *
22099 22097 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22100 22098 * structure for this target.
22101 22099 * bufaddr
22102 22100 * buflen
22103 22101 * class_req
22104 22102 *
22105 22103 * Return Code: 0 - Success
22106 22104 * errno return code from sd_ssc_send()
22107 22105 *
22108 22106 * Context: Can sleep. Does not return until command is completed.
22109 22107 */
22110 22108
22111 22109 static int
22112 22110 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
22113 22111 size_t buflen, uchar_t class_req)
22114 22112 {
22115 22113 union scsi_cdb cdb;
22116 22114 struct uscsi_cmd ucmd_buf;
22117 22115 int status;
22118 22116 struct sd_lun *un;
22119 22117
22120 22118 ASSERT(ssc != NULL);
22121 22119 un = ssc->ssc_un;
22122 22120 ASSERT(un != NULL);
22123 22121 ASSERT(!mutex_owned(SD_MUTEX(un)));
22124 22122 ASSERT(bufaddr != NULL);
22125 22123
22126 22124 SD_TRACE(SD_LOG_IO, un,
22127 22125 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
22128 22126
22129 22127 bzero(&cdb, sizeof (cdb));
22130 22128 bzero(&ucmd_buf, sizeof (ucmd_buf));
22131 22129 bzero(bufaddr, buflen);
22132 22130
22133 22131 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
22134 22132 cdb.cdb_opaque[1] = 1; /* polled */
22135 22133 cdb.cdb_opaque[4] = class_req;
22136 22134 FORMG1COUNT(&cdb, buflen);
22137 22135
22138 22136 ucmd_buf.uscsi_cdb = (char *)&cdb;
22139 22137 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22140 22138 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22141 22139 ucmd_buf.uscsi_buflen = buflen;
22142 22140 ucmd_buf.uscsi_rqbuf = NULL;
22143 22141 ucmd_buf.uscsi_rqlen = 0;
22144 22142 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
22145 22143 ucmd_buf.uscsi_timeout = 60;
22146 22144
22147 22145 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22148 22146 UIO_SYSSPACE, SD_PATH_DIRECT);
22149 22147
22150 22148 /*
22151 22149 * Only handle status == 0, the upper-level caller
22152 22150 * will put different assessment based on the context.
22153 22151 */
22154 22152 if (status == 0) {
22155 22153 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22156 22154
22157 22155 if (ucmd_buf.uscsi_resid != 0) {
22158 22156 status = EIO;
22159 22157 }
22160 22158 }
22161 22159
22162 22160 SD_TRACE(SD_LOG_IO, un,
22163 22161 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22164 22162
22165 22163 return (status);
22166 22164 }
22167 22165
22168 22166
22169 22167 static boolean_t
22170 22168 sd_gesn_media_data_valid(uchar_t *data)
22171 22169 {
22172 22170 uint16_t len;
22173 22171
22174 22172 len = (data[1] << 8) | data[0];
22175 22173 return ((len >= 6) &&
22176 22174 ((data[2] & SD_GESN_HEADER_NEA) == 0) &&
22177 22175 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
22178 22176 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
22179 22177 }
22180 22178
22181 22179
22182 22180 /*
22183 22181 * Function: sdioctl
22184 22182 *
22185 22183 * Description: Driver's ioctl(9e) entry point function.
22186 22184 *
22187 22185 * Arguments: dev - device number
22188 22186 * cmd - ioctl operation to be performed
22189 22187 * arg - user argument, contains data to be set or reference
22190 22188 * parameter for get
22191 22189 * flag - bit flag, indicating open settings, 32/64 bit type
22192 22190 * cred_p - user credential pointer
22193 22191 * rval_p - calling process return value (OPT)
22194 22192 *
22195 22193 * Return Code: EINVAL
22196 22194 * ENOTTY
22197 22195 * ENXIO
22198 22196 * EIO
22199 22197 * EFAULT
22200 22198 * ENOTSUP
22201 22199 * EPERM
22202 22200 *
22203 22201 * Context: Called from the device switch at normal priority.
22204 22202 */
22205 22203
22206 22204 static int
22207 22205 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
22208 22206 {
22209 22207 struct sd_lun *un = NULL;
22210 22208 int err = 0;
22211 22209 int i = 0;
22212 22210 cred_t *cr;
22213 22211 int tmprval = EINVAL;
22214 22212 boolean_t is_valid;
22215 22213 sd_ssc_t *ssc;
22216 22214
22217 22215 /*
22218 22216 * All device accesses go thru sdstrategy where we check on suspend
22219 22217 * status
22220 22218 */
22221 22219 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22222 22220 return (ENXIO);
22223 22221 }
22224 22222
22225 22223 ASSERT(!mutex_owned(SD_MUTEX(un)));
22226 22224
22227 22225 /* Initialize sd_ssc_t for internal uscsi commands */
22228 22226 ssc = sd_ssc_init(un);
22229 22227
22230 22228 is_valid = SD_IS_VALID_LABEL(un);
22231 22229
22232 22230 /*
22233 22231 * Moved this wait from sd_uscsi_strategy to here for
22234 22232 * reasons of deadlock prevention. Internal driver commands,
22235 22233 * specifically those to change a devices power level, result
22236 22234 * in a call to sd_uscsi_strategy.
22237 22235 */
22238 22236 mutex_enter(SD_MUTEX(un));
22239 22237 while ((un->un_state == SD_STATE_SUSPENDED) ||
22240 22238 (un->un_state == SD_STATE_PM_CHANGING)) {
22241 22239 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22242 22240 }
22243 22241 /*
22244 22242 * Twiddling the counter here protects commands from now
22245 22243 * through to the top of sd_uscsi_strategy. Without the
22246 22244 * counter inc. a power down, for example, could get in
22247 22245 * after the above check for state is made and before
22248 22246 * execution gets to the top of sd_uscsi_strategy.
22249 22247 * That would cause problems.
22250 22248 */
22251 22249 un->un_ncmds_in_driver++;
22252 22250
22253 22251 if (!is_valid &&
22254 22252 (flag & (FNDELAY | FNONBLOCK))) {
22255 22253 switch (cmd) {
22256 22254 case DKIOCGGEOM: /* SD_PATH_DIRECT */
22257 22255 case DKIOCGVTOC:
22258 22256 case DKIOCGEXTVTOC:
22259 22257 case DKIOCGAPART:
22260 22258 case DKIOCPARTINFO:
22261 22259 case DKIOCEXTPARTINFO:
22262 22260 case DKIOCSGEOM:
22263 22261 case DKIOCSAPART:
22264 22262 case DKIOCGETEFI:
22265 22263 case DKIOCPARTITION:
22266 22264 case DKIOCSVTOC:
22267 22265 case DKIOCSEXTVTOC:
22268 22266 case DKIOCSETEFI:
22269 22267 case DKIOCGMBOOT:
22270 22268 case DKIOCSMBOOT:
22271 22269 case DKIOCG_PHYGEOM:
22272 22270 case DKIOCG_VIRTGEOM:
22273 22271 #if defined(__i386) || defined(__amd64)
22274 22272 case DKIOCSETEXTPART:
22275 22273 #endif
22276 22274 /* let cmlb handle it */
22277 22275 goto skip_ready_valid;
22278 22276
22279 22277 case CDROMPAUSE:
22280 22278 case CDROMRESUME:
22281 22279 case CDROMPLAYMSF:
22282 22280 case CDROMPLAYTRKIND:
22283 22281 case CDROMREADTOCHDR:
22284 22282 case CDROMREADTOCENTRY:
22285 22283 case CDROMSTOP:
22286 22284 case CDROMSTART:
22287 22285 case CDROMVOLCTRL:
22288 22286 case CDROMSUBCHNL:
22289 22287 case CDROMREADMODE2:
22290 22288 case CDROMREADMODE1:
22291 22289 case CDROMREADOFFSET:
22292 22290 case CDROMSBLKMODE:
22293 22291 case CDROMGBLKMODE:
22294 22292 case CDROMGDRVSPEED:
22295 22293 case CDROMSDRVSPEED:
22296 22294 case CDROMCDDA:
22297 22295 case CDROMCDXA:
22298 22296 case CDROMSUBCODE:
22299 22297 if (!ISCD(un)) {
22300 22298 un->un_ncmds_in_driver--;
22301 22299 ASSERT(un->un_ncmds_in_driver >= 0);
22302 22300 mutex_exit(SD_MUTEX(un));
22303 22301 err = ENOTTY;
22304 22302 goto done_without_assess;
22305 22303 }
22306 22304 break;
22307 22305 case FDEJECT:
22308 22306 case DKIOCEJECT:
22309 22307 case CDROMEJECT:
22310 22308 if (!un->un_f_eject_media_supported) {
22311 22309 un->un_ncmds_in_driver--;
22312 22310 ASSERT(un->un_ncmds_in_driver >= 0);
22313 22311 mutex_exit(SD_MUTEX(un));
22314 22312 err = ENOTTY;
22315 22313 goto done_without_assess;
22316 22314 }
22317 22315 break;
22318 22316 case DKIOCFLUSHWRITECACHE:
22319 22317 mutex_exit(SD_MUTEX(un));
22320 22318 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22321 22319 if (err != 0) {
22322 22320 mutex_enter(SD_MUTEX(un));
22323 22321 un->un_ncmds_in_driver--;
22324 22322 ASSERT(un->un_ncmds_in_driver >= 0);
22325 22323 mutex_exit(SD_MUTEX(un));
22326 22324 err = EIO;
22327 22325 goto done_quick_assess;
22328 22326 }
22329 22327 mutex_enter(SD_MUTEX(un));
22330 22328 /* FALLTHROUGH */
22331 22329 case DKIOCREMOVABLE:
22332 22330 case DKIOCHOTPLUGGABLE:
22333 22331 case DKIOCINFO:
22334 22332 case DKIOCGMEDIAINFO:
22335 22333 case DKIOCGMEDIAINFOEXT:
22336 22334 case DKIOCSOLIDSTATE:
22337 22335 case MHIOCENFAILFAST:
22338 22336 case MHIOCSTATUS:
22339 22337 case MHIOCTKOWN:
22340 22338 case MHIOCRELEASE:
22341 22339 case MHIOCGRP_INKEYS:
22342 22340 case MHIOCGRP_INRESV:
22343 22341 case MHIOCGRP_REGISTER:
22344 22342 case MHIOCGRP_CLEAR:
22345 22343 case MHIOCGRP_RESERVE:
22346 22344 case MHIOCGRP_PREEMPTANDABORT:
22347 22345 case MHIOCGRP_REGISTERANDIGNOREKEY:
22348 22346 case CDROMCLOSETRAY:
22349 22347 case USCSICMD:
22350 22348 goto skip_ready_valid;
22351 22349 default:
22352 22350 break;
22353 22351 }
22354 22352
22355 22353 mutex_exit(SD_MUTEX(un));
22356 22354 err = sd_ready_and_valid(ssc, SDPART(dev));
22357 22355 mutex_enter(SD_MUTEX(un));
22358 22356
22359 22357 if (err != SD_READY_VALID) {
22360 22358 switch (cmd) {
22361 22359 case DKIOCSTATE:
22362 22360 case CDROMGDRVSPEED:
22363 22361 case CDROMSDRVSPEED:
22364 22362 case FDEJECT: /* for eject command */
22365 22363 case DKIOCEJECT:
22366 22364 case CDROMEJECT:
22367 22365 case DKIOCREMOVABLE:
22368 22366 case DKIOCHOTPLUGGABLE:
22369 22367 break;
22370 22368 default:
22371 22369 if (un->un_f_has_removable_media) {
22372 22370 err = ENXIO;
22373 22371 } else {
22374 22372 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22375 22373 if (err == SD_RESERVED_BY_OTHERS) {
22376 22374 err = EACCES;
22377 22375 } else {
22378 22376 err = EIO;
22379 22377 }
22380 22378 }
22381 22379 un->un_ncmds_in_driver--;
22382 22380 ASSERT(un->un_ncmds_in_driver >= 0);
22383 22381 mutex_exit(SD_MUTEX(un));
22384 22382
22385 22383 goto done_without_assess;
22386 22384 }
22387 22385 }
22388 22386 }
22389 22387
22390 22388 skip_ready_valid:
22391 22389 mutex_exit(SD_MUTEX(un));
22392 22390
22393 22391 switch (cmd) {
22394 22392 case DKIOCINFO:
22395 22393 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
22396 22394 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
22397 22395 break;
22398 22396
22399 22397 case DKIOCGMEDIAINFO:
22400 22398 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
22401 22399 err = sd_get_media_info(dev, (caddr_t)arg, flag);
22402 22400 break;
22403 22401
22404 22402 case DKIOCGMEDIAINFOEXT:
22405 22403 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
22406 22404 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
22407 22405 break;
22408 22406
22409 22407 case DKIOCGGEOM:
22410 22408 case DKIOCGVTOC:
22411 22409 case DKIOCGEXTVTOC:
22412 22410 case DKIOCGAPART:
22413 22411 case DKIOCPARTINFO:
22414 22412 case DKIOCEXTPARTINFO:
22415 22413 case DKIOCSGEOM:
22416 22414 case DKIOCSAPART:
22417 22415 case DKIOCGETEFI:
22418 22416 case DKIOCPARTITION:
22419 22417 case DKIOCSVTOC:
22420 22418 case DKIOCSEXTVTOC:
22421 22419 case DKIOCSETEFI:
22422 22420 case DKIOCGMBOOT:
22423 22421 case DKIOCSMBOOT:
22424 22422 case DKIOCG_PHYGEOM:
22425 22423 case DKIOCG_VIRTGEOM:
22426 22424 #if defined(__i386) || defined(__amd64)
22427 22425 case DKIOCSETEXTPART:
22428 22426 #endif
22429 22427 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22430 22428
22431 22429 /* TUR should spin up */
22432 22430
22433 22431 if (un->un_f_has_removable_media)
22434 22432 err = sd_send_scsi_TEST_UNIT_READY(ssc,
22435 22433 SD_CHECK_FOR_MEDIA);
22436 22434
22437 22435 else
22438 22436 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22439 22437
22440 22438 if (err != 0)
22441 22439 goto done_with_assess;
22442 22440
22443 22441 err = cmlb_ioctl(un->un_cmlbhandle, dev,
22444 22442 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
22445 22443
22446 22444 if ((err == 0) &&
22447 22445 ((cmd == DKIOCSETEFI) ||
22448 22446 (un->un_f_pkstats_enabled) &&
22449 22447 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
22450 22448 cmd == DKIOCSEXTVTOC))) {
22451 22449
22452 22450 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
22453 22451 (void *)SD_PATH_DIRECT);
22454 22452 if ((tmprval == 0) && un->un_f_pkstats_enabled) {
22455 22453 sd_set_pstats(un);
22456 22454 SD_TRACE(SD_LOG_IO_PARTITION, un,
22457 22455 "sd_ioctl: un:0x%p pstats created and "
22458 22456 "set\n", un);
22459 22457 }
22460 22458 }
22461 22459
22462 22460 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
22463 22461 ((cmd == DKIOCSETEFI) && (tmprval == 0))) {
22464 22462
22465 22463 mutex_enter(SD_MUTEX(un));
22466 22464 if (un->un_f_devid_supported &&
22467 22465 (un->un_f_opt_fab_devid == TRUE)) {
22468 22466 if (un->un_devid == NULL) {
22469 22467 sd_register_devid(ssc, SD_DEVINFO(un),
22470 22468 SD_TARGET_IS_UNRESERVED);
22471 22469 } else {
22472 22470 /*
22473 22471 * The device id for this disk
22474 22472 * has been fabricated. The
22475 22473 * device id must be preserved
22476 22474 * by writing it back out to
22477 22475 * disk.
22478 22476 */
22479 22477 if (sd_write_deviceid(ssc) != 0) {
22480 22478 ddi_devid_free(un->un_devid);
22481 22479 un->un_devid = NULL;
22482 22480 }
22483 22481 }
22484 22482 }
22485 22483 mutex_exit(SD_MUTEX(un));
22486 22484 }
22487 22485
22488 22486 break;
22489 22487
22490 22488 case DKIOCLOCK:
22491 22489 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
22492 22490 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
22493 22491 SD_PATH_STANDARD);
22494 22492 goto done_with_assess;
22495 22493
22496 22494 case DKIOCUNLOCK:
22497 22495 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
22498 22496 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
22499 22497 SD_PATH_STANDARD);
22500 22498 goto done_with_assess;
22501 22499
22502 22500 case DKIOCSTATE: {
22503 22501 enum dkio_state state;
22504 22502 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
22505 22503
22506 22504 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
22507 22505 err = EFAULT;
22508 22506 } else {
22509 22507 err = sd_check_media(dev, state);
22510 22508 if (err == 0) {
22511 22509 if (ddi_copyout(&un->un_mediastate, (void *)arg,
22512 22510 sizeof (int), flag) != 0)
22513 22511 err = EFAULT;
22514 22512 }
22515 22513 }
22516 22514 break;
22517 22515 }
22518 22516
22519 22517 case DKIOCREMOVABLE:
22520 22518 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
22521 22519 i = un->un_f_has_removable_media ? 1 : 0;
22522 22520 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22523 22521 err = EFAULT;
22524 22522 } else {
22525 22523 err = 0;
22526 22524 }
22527 22525 break;
22528 22526
22529 22527 case DKIOCSOLIDSTATE:
22530 22528 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n");
22531 22529 i = un->un_f_is_solid_state ? 1 : 0;
22532 22530 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22533 22531 err = EFAULT;
22534 22532 } else {
22535 22533 err = 0;
22536 22534 }
22537 22535 break;
22538 22536
22539 22537 case DKIOCHOTPLUGGABLE:
22540 22538 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
22541 22539 i = un->un_f_is_hotpluggable ? 1 : 0;
22542 22540 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22543 22541 err = EFAULT;
22544 22542 } else {
22545 22543 err = 0;
22546 22544 }
22547 22545 break;
22548 22546
22549 22547 case DKIOCREADONLY:
22550 22548 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
22551 22549 i = 0;
22552 22550 if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
22553 22551 (sr_check_wp(dev) != 0)) {
22554 22552 i = 1;
22555 22553 }
22556 22554 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22557 22555 err = EFAULT;
22558 22556 } else {
22559 22557 err = 0;
22560 22558 }
22561 22559 break;
22562 22560
22563 22561 case DKIOCGTEMPERATURE:
22564 22562 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
22565 22563 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
22566 22564 break;
22567 22565
22568 22566 case MHIOCENFAILFAST:
22569 22567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
22570 22568 if ((err = drv_priv(cred_p)) == 0) {
22571 22569 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
22572 22570 }
22573 22571 break;
22574 22572
22575 22573 case MHIOCTKOWN:
22576 22574 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
22577 22575 if ((err = drv_priv(cred_p)) == 0) {
22578 22576 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
22579 22577 }
22580 22578 break;
22581 22579
22582 22580 case MHIOCRELEASE:
22583 22581 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
22584 22582 if ((err = drv_priv(cred_p)) == 0) {
22585 22583 err = sd_mhdioc_release(dev);
22586 22584 }
22587 22585 break;
22588 22586
22589 22587 case MHIOCSTATUS:
22590 22588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
22591 22589 if ((err = drv_priv(cred_p)) == 0) {
22592 22590 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
22593 22591 case 0:
22594 22592 err = 0;
22595 22593 break;
22596 22594 case EACCES:
22597 22595 *rval_p = 1;
22598 22596 err = 0;
22599 22597 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22600 22598 break;
22601 22599 default:
22602 22600 err = EIO;
22603 22601 goto done_with_assess;
22604 22602 }
22605 22603 }
22606 22604 break;
22607 22605
22608 22606 case MHIOCQRESERVE:
22609 22607 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
22610 22608 if ((err = drv_priv(cred_p)) == 0) {
22611 22609 err = sd_reserve_release(dev, SD_RESERVE);
22612 22610 }
22613 22611 break;
22614 22612
22615 22613 case MHIOCREREGISTERDEVID:
22616 22614 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
22617 22615 if (drv_priv(cred_p) == EPERM) {
22618 22616 err = EPERM;
22619 22617 } else if (!un->un_f_devid_supported) {
22620 22618 err = ENOTTY;
22621 22619 } else {
22622 22620 err = sd_mhdioc_register_devid(dev);
22623 22621 }
22624 22622 break;
22625 22623
22626 22624 case MHIOCGRP_INKEYS:
22627 22625 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
22628 22626 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22629 22627 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22630 22628 err = ENOTSUP;
22631 22629 } else {
22632 22630 err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
22633 22631 flag);
22634 22632 }
22635 22633 }
22636 22634 break;
22637 22635
22638 22636 case MHIOCGRP_INRESV:
22639 22637 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
22640 22638 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22641 22639 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22642 22640 err = ENOTSUP;
22643 22641 } else {
22644 22642 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
22645 22643 }
22646 22644 }
22647 22645 break;
22648 22646
22649 22647 case MHIOCGRP_REGISTER:
22650 22648 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
22651 22649 if ((err = drv_priv(cred_p)) != EPERM) {
22652 22650 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22653 22651 err = ENOTSUP;
22654 22652 } else if (arg != NULL) {
22655 22653 mhioc_register_t reg;
22656 22654 if (ddi_copyin((void *)arg, ®,
22657 22655 sizeof (mhioc_register_t), flag) != 0) {
22658 22656 err = EFAULT;
22659 22657 } else {
22660 22658 err =
22661 22659 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22662 22660 ssc, SD_SCSI3_REGISTER,
22663 22661 (uchar_t *)®);
22664 22662 if (err != 0)
22665 22663 goto done_with_assess;
22666 22664 }
22667 22665 }
22668 22666 }
22669 22667 break;
22670 22668
22671 22669 case MHIOCGRP_CLEAR:
22672 22670 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
22673 22671 if ((err = drv_priv(cred_p)) != EPERM) {
22674 22672 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22675 22673 err = ENOTSUP;
22676 22674 } else if (arg != NULL) {
22677 22675 mhioc_register_t reg;
22678 22676 if (ddi_copyin((void *)arg, ®,
22679 22677 sizeof (mhioc_register_t), flag) != 0) {
22680 22678 err = EFAULT;
22681 22679 } else {
22682 22680 err =
22683 22681 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22684 22682 ssc, SD_SCSI3_CLEAR,
22685 22683 (uchar_t *)®);
22686 22684 if (err != 0)
22687 22685 goto done_with_assess;
22688 22686 }
22689 22687 }
22690 22688 }
22691 22689 break;
22692 22690
22693 22691 case MHIOCGRP_RESERVE:
22694 22692 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
22695 22693 if ((err = drv_priv(cred_p)) != EPERM) {
22696 22694 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22697 22695 err = ENOTSUP;
22698 22696 } else if (arg != NULL) {
22699 22697 mhioc_resv_desc_t resv_desc;
22700 22698 if (ddi_copyin((void *)arg, &resv_desc,
22701 22699 sizeof (mhioc_resv_desc_t), flag) != 0) {
22702 22700 err = EFAULT;
22703 22701 } else {
22704 22702 err =
22705 22703 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22706 22704 ssc, SD_SCSI3_RESERVE,
22707 22705 (uchar_t *)&resv_desc);
22708 22706 if (err != 0)
22709 22707 goto done_with_assess;
22710 22708 }
22711 22709 }
22712 22710 }
22713 22711 break;
22714 22712
22715 22713 case MHIOCGRP_PREEMPTANDABORT:
22716 22714 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
22717 22715 if ((err = drv_priv(cred_p)) != EPERM) {
22718 22716 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22719 22717 err = ENOTSUP;
22720 22718 } else if (arg != NULL) {
22721 22719 mhioc_preemptandabort_t preempt_abort;
22722 22720 if (ddi_copyin((void *)arg, &preempt_abort,
22723 22721 sizeof (mhioc_preemptandabort_t),
22724 22722 flag) != 0) {
22725 22723 err = EFAULT;
22726 22724 } else {
22727 22725 err =
22728 22726 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22729 22727 ssc, SD_SCSI3_PREEMPTANDABORT,
22730 22728 (uchar_t *)&preempt_abort);
22731 22729 if (err != 0)
22732 22730 goto done_with_assess;
22733 22731 }
22734 22732 }
22735 22733 }
22736 22734 break;
22737 22735
22738 22736 case MHIOCGRP_REGISTERANDIGNOREKEY:
22739 22737 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22740 22738 if ((err = drv_priv(cred_p)) != EPERM) {
22741 22739 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22742 22740 err = ENOTSUP;
22743 22741 } else if (arg != NULL) {
22744 22742 mhioc_registerandignorekey_t r_and_i;
22745 22743 if (ddi_copyin((void *)arg, (void *)&r_and_i,
22746 22744 sizeof (mhioc_registerandignorekey_t),
22747 22745 flag) != 0) {
22748 22746 err = EFAULT;
22749 22747 } else {
22750 22748 err =
22751 22749 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22752 22750 ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
22753 22751 (uchar_t *)&r_and_i);
22754 22752 if (err != 0)
22755 22753 goto done_with_assess;
22756 22754 }
22757 22755 }
22758 22756 }
22759 22757 break;
22760 22758
22761 22759 case USCSICMD:
22762 22760 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
22763 22761 cr = ddi_get_cred();
22764 22762 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
22765 22763 err = EPERM;
22766 22764 } else {
22767 22765 enum uio_seg uioseg;
22768 22766
22769 22767 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
22770 22768 UIO_USERSPACE;
22771 22769 if (un->un_f_format_in_progress == TRUE) {
22772 22770 err = EAGAIN;
22773 22771 break;
22774 22772 }
22775 22773
22776 22774 err = sd_ssc_send(ssc,
22777 22775 (struct uscsi_cmd *)arg,
22778 22776 flag, uioseg, SD_PATH_STANDARD);
22779 22777 if (err != 0)
22780 22778 goto done_with_assess;
22781 22779 else
22782 22780 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22783 22781 }
22784 22782 break;
22785 22783
22786 22784 case CDROMPAUSE:
22787 22785 case CDROMRESUME:
22788 22786 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
22789 22787 if (!ISCD(un)) {
22790 22788 err = ENOTTY;
22791 22789 } else {
22792 22790 err = sr_pause_resume(dev, cmd);
22793 22791 }
22794 22792 break;
22795 22793
22796 22794 case CDROMPLAYMSF:
22797 22795 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22798 22796 if (!ISCD(un)) {
22799 22797 err = ENOTTY;
22800 22798 } else {
22801 22799 err = sr_play_msf(dev, (caddr_t)arg, flag);
22802 22800 }
22803 22801 break;
22804 22802
22805 22803 case CDROMPLAYTRKIND:
22806 22804 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22807 22805 #if defined(__i386) || defined(__amd64)
22808 22806 /*
22809 22807 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22810 22808 */
22811 22809 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22812 22810 #else
22813 22811 if (!ISCD(un)) {
22814 22812 #endif
22815 22813 err = ENOTTY;
22816 22814 } else {
22817 22815 err = sr_play_trkind(dev, (caddr_t)arg, flag);
22818 22816 }
22819 22817 break;
22820 22818
22821 22819 case CDROMREADTOCHDR:
22822 22820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22823 22821 if (!ISCD(un)) {
22824 22822 err = ENOTTY;
22825 22823 } else {
22826 22824 err = sr_read_tochdr(dev, (caddr_t)arg, flag);
22827 22825 }
22828 22826 break;
22829 22827
22830 22828 case CDROMREADTOCENTRY:
22831 22829 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
22832 22830 if (!ISCD(un)) {
22833 22831 err = ENOTTY;
22834 22832 } else {
22835 22833 err = sr_read_tocentry(dev, (caddr_t)arg, flag);
22836 22834 }
22837 22835 break;
22838 22836
22839 22837 case CDROMSTOP:
22840 22838 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
22841 22839 if (!ISCD(un)) {
22842 22840 err = ENOTTY;
22843 22841 } else {
22844 22842 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22845 22843 SD_TARGET_STOP, SD_PATH_STANDARD);
22846 22844 goto done_with_assess;
22847 22845 }
22848 22846 break;
22849 22847
22850 22848 case CDROMSTART:
22851 22849 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
22852 22850 if (!ISCD(un)) {
22853 22851 err = ENOTTY;
22854 22852 } else {
22855 22853 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22856 22854 SD_TARGET_START, SD_PATH_STANDARD);
22857 22855 goto done_with_assess;
22858 22856 }
22859 22857 break;
22860 22858
22861 22859 case CDROMCLOSETRAY:
22862 22860 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
22863 22861 if (!ISCD(un)) {
22864 22862 err = ENOTTY;
22865 22863 } else {
22866 22864 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22867 22865 SD_TARGET_CLOSE, SD_PATH_STANDARD);
22868 22866 goto done_with_assess;
22869 22867 }
22870 22868 break;
22871 22869
22872 22870 case FDEJECT: /* for eject command */
22873 22871 case DKIOCEJECT:
22874 22872 case CDROMEJECT:
22875 22873 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
22876 22874 if (!un->un_f_eject_media_supported) {
22877 22875 err = ENOTTY;
22878 22876 } else {
22879 22877 err = sr_eject(dev);
22880 22878 }
22881 22879 break;
22882 22880
22883 22881 case CDROMVOLCTRL:
22884 22882 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
22885 22883 if (!ISCD(un)) {
22886 22884 err = ENOTTY;
22887 22885 } else {
22888 22886 err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
22889 22887 }
22890 22888 break;
22891 22889
22892 22890 case CDROMSUBCHNL:
22893 22891 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
22894 22892 if (!ISCD(un)) {
22895 22893 err = ENOTTY;
22896 22894 } else {
22897 22895 err = sr_read_subchannel(dev, (caddr_t)arg, flag);
22898 22896 }
22899 22897 break;
22900 22898
22901 22899 case CDROMREADMODE2:
22902 22900 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
22903 22901 if (!ISCD(un)) {
22904 22902 err = ENOTTY;
22905 22903 } else if (un->un_f_cfg_is_atapi == TRUE) {
22906 22904 /*
22907 22905 * If the drive supports READ CD, use that instead of
22908 22906 * switching the LBA size via a MODE SELECT
22909 22907 * Block Descriptor
22910 22908 */
22911 22909 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
22912 22910 } else {
22913 22911 err = sr_read_mode2(dev, (caddr_t)arg, flag);
22914 22912 }
22915 22913 break;
22916 22914
22917 22915 case CDROMREADMODE1:
22918 22916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
22919 22917 if (!ISCD(un)) {
22920 22918 err = ENOTTY;
22921 22919 } else {
22922 22920 err = sr_read_mode1(dev, (caddr_t)arg, flag);
22923 22921 }
22924 22922 break;
22925 22923
22926 22924 case CDROMREADOFFSET:
22927 22925 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
22928 22926 if (!ISCD(un)) {
22929 22927 err = ENOTTY;
22930 22928 } else {
22931 22929 err = sr_read_sony_session_offset(dev, (caddr_t)arg,
22932 22930 flag);
22933 22931 }
22934 22932 break;
22935 22933
22936 22934 case CDROMSBLKMODE:
22937 22935 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
22938 22936 /*
22939 22937 * There is no means of changing block size in case of atapi
22940 22938 * drives, thus return ENOTTY if drive type is atapi
22941 22939 */
22942 22940 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22943 22941 err = ENOTTY;
22944 22942 } else if (un->un_f_mmc_cap == TRUE) {
22945 22943
22946 22944 /*
22947 22945 * MMC Devices do not support changing the
22948 22946 * logical block size
22949 22947 *
22950 22948 * Note: EINVAL is being returned instead of ENOTTY to
22951 22949 * maintain consistancy with the original mmc
22952 22950 * driver update.
22953 22951 */
22954 22952 err = EINVAL;
22955 22953 } else {
22956 22954 mutex_enter(SD_MUTEX(un));
22957 22955 if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
22958 22956 (un->un_ncmds_in_transport > 0)) {
22959 22957 mutex_exit(SD_MUTEX(un));
22960 22958 err = EINVAL;
22961 22959 } else {
22962 22960 mutex_exit(SD_MUTEX(un));
22963 22961 err = sr_change_blkmode(dev, cmd, arg, flag);
22964 22962 }
22965 22963 }
22966 22964 break;
22967 22965
22968 22966 case CDROMGBLKMODE:
22969 22967 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
22970 22968 if (!ISCD(un)) {
22971 22969 err = ENOTTY;
22972 22970 } else if ((un->un_f_cfg_is_atapi != FALSE) &&
22973 22971 (un->un_f_blockcount_is_valid != FALSE)) {
22974 22972 /*
22975 22973 * Drive is an ATAPI drive so return target block
22976 22974 * size for ATAPI drives since we cannot change the
22977 22975 * blocksize on ATAPI drives. Used primarily to detect
22978 22976 * if an ATAPI cdrom is present.
22979 22977 */
22980 22978 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
22981 22979 sizeof (int), flag) != 0) {
22982 22980 err = EFAULT;
22983 22981 } else {
22984 22982 err = 0;
22985 22983 }
22986 22984
22987 22985 } else {
22988 22986 /*
22989 22987 * Drive supports changing block sizes via a Mode
22990 22988 * Select.
22991 22989 */
22992 22990 err = sr_change_blkmode(dev, cmd, arg, flag);
22993 22991 }
22994 22992 break;
22995 22993
22996 22994 case CDROMGDRVSPEED:
22997 22995 case CDROMSDRVSPEED:
22998 22996 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
22999 22997 if (!ISCD(un)) {
23000 22998 err = ENOTTY;
23001 22999 } else if (un->un_f_mmc_cap == TRUE) {
23002 23000 /*
23003 23001 * Note: In the future the driver implementation
23004 23002 * for getting and
23005 23003 * setting cd speed should entail:
23006 23004 * 1) If non-mmc try the Toshiba mode page
23007 23005 * (sr_change_speed)
23008 23006 * 2) If mmc but no support for Real Time Streaming try
23009 23007 * the SET CD SPEED (0xBB) command
23010 23008 * (sr_atapi_change_speed)
23011 23009 * 3) If mmc and support for Real Time Streaming
23012 23010 * try the GET PERFORMANCE and SET STREAMING
23013 23011 * commands (not yet implemented, 4380808)
23014 23012 */
23015 23013 /*
23016 23014 * As per recent MMC spec, CD-ROM speed is variable
23017 23015 * and changes with LBA. Since there is no such
23018 23016 * things as drive speed now, fail this ioctl.
23019 23017 *
23020 23018 * Note: EINVAL is returned for consistancy of original
23021 23019 * implementation which included support for getting
23022 23020 * the drive speed of mmc devices but not setting
23023 23021 * the drive speed. Thus EINVAL would be returned
23024 23022 * if a set request was made for an mmc device.
23025 23023 * We no longer support get or set speed for
23026 23024 * mmc but need to remain consistent with regard
23027 23025 * to the error code returned.
23028 23026 */
23029 23027 err = EINVAL;
23030 23028 } else if (un->un_f_cfg_is_atapi == TRUE) {
23031 23029 err = sr_atapi_change_speed(dev, cmd, arg, flag);
23032 23030 } else {
23033 23031 err = sr_change_speed(dev, cmd, arg, flag);
23034 23032 }
23035 23033 break;
23036 23034
23037 23035 case CDROMCDDA:
23038 23036 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
23039 23037 if (!ISCD(un)) {
23040 23038 err = ENOTTY;
23041 23039 } else {
23042 23040 err = sr_read_cdda(dev, (void *)arg, flag);
23043 23041 }
23044 23042 break;
23045 23043
23046 23044 case CDROMCDXA:
23047 23045 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
23048 23046 if (!ISCD(un)) {
23049 23047 err = ENOTTY;
23050 23048 } else {
23051 23049 err = sr_read_cdxa(dev, (caddr_t)arg, flag);
23052 23050 }
23053 23051 break;
23054 23052
23055 23053 case CDROMSUBCODE:
23056 23054 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
23057 23055 if (!ISCD(un)) {
23058 23056 err = ENOTTY;
23059 23057 } else {
23060 23058 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
23061 23059 }
23062 23060 break;
23063 23061
23064 23062
23065 23063 #ifdef SDDEBUG
23066 23064 /* RESET/ABORTS testing ioctls */
23067 23065 case DKIOCRESET: {
23068 23066 int reset_level;
23069 23067
23070 23068 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
23071 23069 err = EFAULT;
23072 23070 } else {
23073 23071 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
23074 23072 "reset_level = 0x%lx\n", reset_level);
23075 23073 if (scsi_reset(SD_ADDRESS(un), reset_level)) {
23076 23074 err = 0;
23077 23075 } else {
23078 23076 err = EIO;
23079 23077 }
23080 23078 }
23081 23079 break;
23082 23080 }
23083 23081
23084 23082 case DKIOCABORT:
23085 23083 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
23086 23084 if (scsi_abort(SD_ADDRESS(un), NULL)) {
23087 23085 err = 0;
23088 23086 } else {
23089 23087 err = EIO;
23090 23088 }
23091 23089 break;
23092 23090 #endif
23093 23091
23094 23092 #ifdef SD_FAULT_INJECTION
23095 23093 /* SDIOC FaultInjection testing ioctls */
23096 23094 case SDIOCSTART:
23097 23095 case SDIOCSTOP:
23098 23096 case SDIOCINSERTPKT:
23099 23097 case SDIOCINSERTXB:
23100 23098 case SDIOCINSERTUN:
23101 23099 case SDIOCINSERTARQ:
23102 23100 case SDIOCPUSH:
23103 23101 case SDIOCRETRIEVE:
23104 23102 case SDIOCRUN:
23105 23103 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23106 23104 "SDIOC detected cmd:0x%X:\n", cmd);
23107 23105 /* call error generator */
23108 23106 sd_faultinjection_ioctl(cmd, arg, un);
23109 23107 err = 0;
23110 23108 break;
23111 23109
23112 23110 #endif /* SD_FAULT_INJECTION */
23113 23111
23114 23112 case DKIOCFLUSHWRITECACHE:
23115 23113 {
23116 23114 struct dk_callback *dkc = (struct dk_callback *)arg;
23117 23115
23118 23116 mutex_enter(SD_MUTEX(un));
23119 23117 if (!un->un_f_sync_cache_supported ||
23120 23118 !un->un_f_write_cache_enabled) {
23121 23119 err = un->un_f_sync_cache_supported ?
23122 23120 0 : ENOTSUP;
23123 23121 mutex_exit(SD_MUTEX(un));
23124 23122 if ((flag & FKIOCTL) && dkc != NULL &&
23125 23123 dkc->dkc_callback != NULL) {
23126 23124 (*dkc->dkc_callback)(dkc->dkc_cookie,
23127 23125 err);
23128 23126 /*
23129 23127 * Did callback and reported error.
23130 23128 * Since we did a callback, ioctl
23131 23129 * should return 0.
23132 23130 */
23133 23131 err = 0;
23134 23132 }
23135 23133 break;
23136 23134 }
23137 23135 mutex_exit(SD_MUTEX(un));
23138 23136
23139 23137 if ((flag & FKIOCTL) && dkc != NULL &&
23140 23138 dkc->dkc_callback != NULL) {
23141 23139 /* async SYNC CACHE request */
23142 23140 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23143 23141 } else {
23144 23142 /* synchronous SYNC CACHE request */
23145 23143 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23146 23144 }
23147 23145 }
23148 23146 break;
23149 23147
23150 23148 case DKIOCGETWCE: {
23151 23149
23152 23150 int wce;
23153 23151
23154 23152 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23155 23153 break;
23156 23154 }
23157 23155
23158 23156 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23159 23157 err = EFAULT;
23160 23158 }
23161 23159 break;
23162 23160 }
23163 23161
23164 23162 case DKIOCSETWCE: {
23165 23163
23166 23164 int wce, sync_supported;
23167 23165 int cur_wce = 0;
23168 23166
23169 23167 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
23170 23168 err = EFAULT;
23171 23169 break;
23172 23170 }
23173 23171
23174 23172 /*
23175 23173 * Synchronize multiple threads trying to enable
23176 23174 * or disable the cache via the un_f_wcc_cv
23177 23175 * condition variable.
23178 23176 */
23179 23177 mutex_enter(SD_MUTEX(un));
23180 23178
23181 23179 /*
23182 23180 * Don't allow the cache to be enabled if the
23183 23181 * config file has it disabled.
23184 23182 */
23185 23183 if (un->un_f_opt_disable_cache && wce) {
23186 23184 mutex_exit(SD_MUTEX(un));
23187 23185 err = EINVAL;
23188 23186 break;
23189 23187 }
23190 23188
23191 23189 /*
23192 23190 * Wait for write cache change in progress
23193 23191 * bit to be clear before proceeding.
23194 23192 */
23195 23193 while (un->un_f_wcc_inprog)
23196 23194 cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
23197 23195
23198 23196 un->un_f_wcc_inprog = 1;
23199 23197
23200 23198 mutex_exit(SD_MUTEX(un));
23201 23199
23202 23200 /*
23203 23201 * Get the current write cache state
23204 23202 */
23205 23203 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
23206 23204 mutex_enter(SD_MUTEX(un));
23207 23205 un->un_f_wcc_inprog = 0;
23208 23206 cv_broadcast(&un->un_wcc_cv);
23209 23207 mutex_exit(SD_MUTEX(un));
23210 23208 break;
23211 23209 }
23212 23210
23213 23211 mutex_enter(SD_MUTEX(un));
23214 23212 un->un_f_write_cache_enabled = (cur_wce != 0);
23215 23213
23216 23214 if (un->un_f_write_cache_enabled && wce == 0) {
23217 23215 /*
23218 23216 * Disable the write cache. Don't clear
23219 23217 * un_f_write_cache_enabled until after
23220 23218 * the mode select and flush are complete.
23221 23219 */
23222 23220 sync_supported = un->un_f_sync_cache_supported;
23223 23221
23224 23222 /*
23225 23223 * If cache flush is suppressed, we assume that the
23226 23224 * controller firmware will take care of managing the
23227 23225 * write cache for us: no need to explicitly
23228 23226 * disable it.
23229 23227 */
23230 23228 if (!un->un_f_suppress_cache_flush) {
23231 23229 mutex_exit(SD_MUTEX(un));
23232 23230 if ((err = sd_cache_control(ssc,
23233 23231 SD_CACHE_NOCHANGE,
23234 23232 SD_CACHE_DISABLE)) == 0 &&
23235 23233 sync_supported) {
23236 23234 err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
23237 23235 NULL);
23238 23236 }
23239 23237 } else {
23240 23238 mutex_exit(SD_MUTEX(un));
23241 23239 }
23242 23240
23243 23241 mutex_enter(SD_MUTEX(un));
23244 23242 if (err == 0) {
23245 23243 un->un_f_write_cache_enabled = 0;
23246 23244 }
23247 23245
23248 23246 } else if (!un->un_f_write_cache_enabled && wce != 0) {
23249 23247 /*
23250 23248 * Set un_f_write_cache_enabled first, so there is
23251 23249 * no window where the cache is enabled, but the
23252 23250 * bit says it isn't.
23253 23251 */
23254 23252 un->un_f_write_cache_enabled = 1;
23255 23253
23256 23254 /*
23257 23255 * If cache flush is suppressed, we assume that the
23258 23256 * controller firmware will take care of managing the
23259 23257 * write cache for us: no need to explicitly
23260 23258 * enable it.
23261 23259 */
23262 23260 if (!un->un_f_suppress_cache_flush) {
23263 23261 mutex_exit(SD_MUTEX(un));
23264 23262 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
23265 23263 SD_CACHE_ENABLE);
23266 23264 } else {
23267 23265 mutex_exit(SD_MUTEX(un));
23268 23266 }
23269 23267
23270 23268 mutex_enter(SD_MUTEX(un));
23271 23269
23272 23270 if (err) {
23273 23271 un->un_f_write_cache_enabled = 0;
23274 23272 }
23275 23273 }
23276 23274
23277 23275 un->un_f_wcc_inprog = 0;
23278 23276 cv_broadcast(&un->un_wcc_cv);
23279 23277 mutex_exit(SD_MUTEX(un));
23280 23278 break;
23281 23279 }
23282 23280
23283 23281 default:
23284 23282 err = ENOTTY;
23285 23283 break;
23286 23284 }
23287 23285 mutex_enter(SD_MUTEX(un));
23288 23286 un->un_ncmds_in_driver--;
23289 23287 ASSERT(un->un_ncmds_in_driver >= 0);
23290 23288 mutex_exit(SD_MUTEX(un));
23291 23289
23292 23290
23293 23291 done_without_assess:
23294 23292 sd_ssc_fini(ssc);
23295 23293
23296 23294 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23297 23295 return (err);
23298 23296
23299 23297 done_with_assess:
23300 23298 mutex_enter(SD_MUTEX(un));
23301 23299 un->un_ncmds_in_driver--;
23302 23300 ASSERT(un->un_ncmds_in_driver >= 0);
23303 23301 mutex_exit(SD_MUTEX(un));
23304 23302
23305 23303 done_quick_assess:
23306 23304 if (err != 0)
23307 23305 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23308 23306 /* Uninitialize sd_ssc_t pointer */
23309 23307 sd_ssc_fini(ssc);
23310 23308
23311 23309 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23312 23310 return (err);
23313 23311 }
23314 23312
23315 23313
23316 23314 /*
23317 23315 * Function: sd_dkio_ctrl_info
23318 23316 *
23319 23317 * Description: This routine is the driver entry point for handling controller
23320 23318 * information ioctl requests (DKIOCINFO).
23321 23319 *
23322 23320 * Arguments: dev - the device number
23323 23321 * arg - pointer to user provided dk_cinfo structure
23324 23322 * specifying the controller type and attributes.
23325 23323 * flag - this argument is a pass through to ddi_copyxxx()
23326 23324 * directly from the mode argument of ioctl().
23327 23325 *
23328 23326 * Return Code: 0
23329 23327 * EFAULT
23330 23328 * ENXIO
23331 23329 */
23332 23330
23333 23331 static int
23334 23332 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
23335 23333 {
23336 23334 struct sd_lun *un = NULL;
23337 23335 struct dk_cinfo *info;
23338 23336 dev_info_t *pdip;
23339 23337 int lun, tgt;
23340 23338
23341 23339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23342 23340 return (ENXIO);
23343 23341 }
23344 23342
23345 23343 info = (struct dk_cinfo *)
23346 23344 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
23347 23345
23348 23346 switch (un->un_ctype) {
23349 23347 case CTYPE_CDROM:
23350 23348 info->dki_ctype = DKC_CDROM;
23351 23349 break;
23352 23350 default:
23353 23351 info->dki_ctype = DKC_SCSI_CCS;
23354 23352 break;
23355 23353 }
23356 23354 pdip = ddi_get_parent(SD_DEVINFO(un));
23357 23355 info->dki_cnum = ddi_get_instance(pdip);
23358 23356 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
23359 23357 (void) strcpy(info->dki_cname, ddi_get_name(pdip));
23360 23358 } else {
23361 23359 (void) strncpy(info->dki_cname, ddi_node_name(pdip),
23362 23360 DK_DEVLEN - 1);
23363 23361 }
23364 23362
23365 23363 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23366 23364 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
23367 23365 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23368 23366 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
23369 23367
23370 23368 /* Unit Information */
23371 23369 info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
23372 23370 info->dki_slave = ((tgt << 3) | lun);
23373 23371 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
23374 23372 DK_DEVLEN - 1);
23375 23373 info->dki_flags = DKI_FMTVOL;
23376 23374 info->dki_partition = SDPART(dev);
23377 23375
23378 23376 /* Max Transfer size of this device in blocks */
23379 23377 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
23380 23378 info->dki_addr = 0;
23381 23379 info->dki_space = 0;
23382 23380 info->dki_prio = 0;
23383 23381 info->dki_vec = 0;
23384 23382
23385 23383 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
23386 23384 kmem_free(info, sizeof (struct dk_cinfo));
23387 23385 return (EFAULT);
23388 23386 } else {
23389 23387 kmem_free(info, sizeof (struct dk_cinfo));
23390 23388 return (0);
23391 23389 }
23392 23390 }
23393 23391
23394 23392 /*
23395 23393 * Function: sd_get_media_info_com
23396 23394 *
23397 23395 * Description: This routine returns the information required to populate
23398 23396 * the fields for the dk_minfo/dk_minfo_ext structures.
23399 23397 *
23400 23398 * Arguments: dev - the device number
23401 23399 * dki_media_type - media_type
23402 23400 * dki_lbsize - logical block size
23403 23401 * dki_capacity - capacity in blocks
23404 23402 * dki_pbsize - physical block size (if requested)
23405 23403 *
23406 23404 * Return Code: 0
23407 23405 * EACCESS
23408 23406 * EFAULT
23409 23407 * ENXIO
23410 23408 * EIO
23411 23409 */
23412 23410 static int
23413 23411 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23414 23412 diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23415 23413 {
23416 23414 struct sd_lun *un = NULL;
23417 23415 struct uscsi_cmd com;
23418 23416 struct scsi_inquiry *sinq;
23419 23417 u_longlong_t media_capacity;
23420 23418 uint64_t capacity;
23421 23419 uint_t lbasize;
23422 23420 uint_t pbsize;
23423 23421 uchar_t *out_data;
23424 23422 uchar_t *rqbuf;
23425 23423 int rval = 0;
23426 23424 int rtn;
23427 23425 sd_ssc_t *ssc;
23428 23426
23429 23427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
23430 23428 (un->un_state == SD_STATE_OFFLINE)) {
23431 23429 return (ENXIO);
23432 23430 }
23433 23431
23434 23432 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
23435 23433
23436 23434 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
23437 23435 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
23438 23436 ssc = sd_ssc_init(un);
23439 23437
23440 23438 /* Issue a TUR to determine if the drive is ready with media present */
23441 23439 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
23442 23440 if (rval == ENXIO) {
23443 23441 goto done;
23444 23442 } else if (rval != 0) {
23445 23443 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23446 23444 }
23447 23445
23448 23446 /* Now get configuration data */
23449 23447 if (ISCD(un)) {
23450 23448 *dki_media_type = DK_CDROM;
23451 23449
23452 23450 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23453 23451 if (un->un_f_mmc_cap == TRUE) {
23454 23452 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
23455 23453 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
23456 23454 SD_PATH_STANDARD);
23457 23455
23458 23456 if (rtn) {
23459 23457 /*
23460 23458 * We ignore all failures for CD and need to
23461 23459 * put the assessment before processing code
23462 23460 * to avoid missing assessment for FMA.
23463 23461 */
23464 23462 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23465 23463 /*
23466 23464 * Failed for other than an illegal request
23467 23465 * or command not supported
23468 23466 */
23469 23467 if ((com.uscsi_status == STATUS_CHECK) &&
23470 23468 (com.uscsi_rqstatus == STATUS_GOOD)) {
23471 23469 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
23472 23470 (rqbuf[12] != 0x20)) {
23473 23471 rval = EIO;
23474 23472 goto no_assessment;
23475 23473 }
23476 23474 }
23477 23475 } else {
23478 23476 /*
23479 23477 * The GET CONFIGURATION command succeeded
23480 23478 * so set the media type according to the
23481 23479 * returned data
23482 23480 */
23483 23481 *dki_media_type = out_data[6];
23484 23482 *dki_media_type <<= 8;
23485 23483 *dki_media_type |= out_data[7];
23486 23484 }
23487 23485 }
23488 23486 } else {
23489 23487 /*
23490 23488 * The profile list is not available, so we attempt to identify
23491 23489 * the media type based on the inquiry data
23492 23490 */
23493 23491 sinq = un->un_sd->sd_inq;
23494 23492 if ((sinq->inq_dtype == DTYPE_DIRECT) ||
23495 23493 (sinq->inq_dtype == DTYPE_OPTICAL)) {
23496 23494 /* This is a direct access device or optical disk */
23497 23495 *dki_media_type = DK_FIXED_DISK;
23498 23496
23499 23497 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
23500 23498 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
23501 23499 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
23502 23500 *dki_media_type = DK_ZIP;
23503 23501 } else if (
23504 23502 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
23505 23503 *dki_media_type = DK_JAZ;
23506 23504 }
23507 23505 }
23508 23506 } else {
23509 23507 /*
23510 23508 * Not a CD, direct access or optical disk so return
23511 23509 * unknown media
23512 23510 */
23513 23511 *dki_media_type = DK_UNKNOWN;
23514 23512 }
23515 23513 }
23516 23514
23517 23515 /*
23518 23516 * Now read the capacity so we can provide the lbasize,
23519 23517 * pbsize and capacity.
23520 23518 */
23521 23519 if (dki_pbsize && un->un_f_descr_format_supported) {
23522 23520 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23523 23521 &pbsize, SD_PATH_DIRECT);
23524 23522
23525 23523 /*
23526 23524 * Override the physical blocksize if the instance already
23527 23525 * has a larger value.
23528 23526 */
23529 23527 pbsize = MAX(pbsize, un->un_phy_blocksize);
23530 23528 }
23531 23529
23532 23530 if (dki_pbsize == NULL || rval != 0 ||
23533 23531 !un->un_f_descr_format_supported) {
23534 23532 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23535 23533 SD_PATH_DIRECT);
23536 23534
23537 23535 switch (rval) {
23538 23536 case 0:
23539 23537 if (un->un_f_enable_rmw &&
23540 23538 un->un_phy_blocksize != 0) {
23541 23539 pbsize = un->un_phy_blocksize;
23542 23540 } else {
23543 23541 pbsize = lbasize;
23544 23542 }
23545 23543 media_capacity = capacity;
23546 23544
23547 23545 /*
23548 23546 * sd_send_scsi_READ_CAPACITY() reports capacity in
23549 23547 * un->un_sys_blocksize chunks. So we need to convert
23550 23548 * it into cap.lbsize chunks.
23551 23549 */
23552 23550 if (un->un_f_has_removable_media) {
23553 23551 media_capacity *= un->un_sys_blocksize;
23554 23552 media_capacity /= lbasize;
23555 23553 }
23556 23554 break;
23557 23555 case EACCES:
23558 23556 rval = EACCES;
23559 23557 goto done;
23560 23558 default:
23561 23559 rval = EIO;
23562 23560 goto done;
23563 23561 }
23564 23562 } else {
23565 23563 if (un->un_f_enable_rmw &&
23566 23564 !ISP2(pbsize % DEV_BSIZE)) {
23567 23565 pbsize = SSD_SECSIZE;
23568 23566 } else if (!ISP2(lbasize % DEV_BSIZE) ||
23569 23567 !ISP2(pbsize % DEV_BSIZE)) {
23570 23568 pbsize = lbasize = DEV_BSIZE;
23571 23569 }
23572 23570 media_capacity = capacity;
23573 23571 }
23574 23572
23575 23573 /*
23576 23574 * If lun is expanded dynamically, update the un structure.
23577 23575 */
23578 23576 mutex_enter(SD_MUTEX(un));
23579 23577 if ((un->un_f_blockcount_is_valid == TRUE) &&
23580 23578 (un->un_f_tgt_blocksize_is_valid == TRUE) &&
23581 23579 (capacity > un->un_blockcount)) {
23582 23580 un->un_f_expnevent = B_FALSE;
23583 23581 sd_update_block_info(un, lbasize, capacity);
23584 23582 }
23585 23583 mutex_exit(SD_MUTEX(un));
23586 23584
23587 23585 *dki_lbsize = lbasize;
23588 23586 *dki_capacity = media_capacity;
23589 23587 if (dki_pbsize)
23590 23588 *dki_pbsize = pbsize;
23591 23589
23592 23590 done:
23593 23591 if (rval != 0) {
23594 23592 if (rval == EIO)
23595 23593 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23596 23594 else
23597 23595 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23598 23596 }
23599 23597 no_assessment:
23600 23598 sd_ssc_fini(ssc);
23601 23599 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
23602 23600 kmem_free(rqbuf, SENSE_LENGTH);
23603 23601 return (rval);
23604 23602 }
23605 23603
23606 23604 /*
23607 23605 * Function: sd_get_media_info
23608 23606 *
23609 23607 * Description: This routine is the driver entry point for handling ioctl
23610 23608 * requests for the media type or command set profile used by the
23611 23609 * drive to operate on the media (DKIOCGMEDIAINFO).
23612 23610 *
23613 23611 * Arguments: dev - the device number
23614 23612 * arg - pointer to user provided dk_minfo structure
23615 23613 * specifying the media type, logical block size and
23616 23614 * drive capacity.
23617 23615 * flag - this argument is a pass through to ddi_copyxxx()
23618 23616 * directly from the mode argument of ioctl().
23619 23617 *
23620 23618 * Return Code: returns the value from sd_get_media_info_com
23621 23619 */
23622 23620 static int
23623 23621 sd_get_media_info(dev_t dev, caddr_t arg, int flag)
23624 23622 {
23625 23623 struct dk_minfo mi;
23626 23624 int rval;
23627 23625
23628 23626 rval = sd_get_media_info_com(dev, &mi.dki_media_type,
23629 23627 &mi.dki_lbsize, &mi.dki_capacity, NULL);
23630 23628
23631 23629 if (rval)
23632 23630 return (rval);
23633 23631 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
23634 23632 rval = EFAULT;
23635 23633 return (rval);
23636 23634 }
23637 23635
23638 23636 /*
23639 23637 * Function: sd_get_media_info_ext
23640 23638 *
23641 23639 * Description: This routine is the driver entry point for handling ioctl
23642 23640 * requests for the media type or command set profile used by the
23643 23641 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23644 23642 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23645 23643 * of this ioctl contains both logical block size and physical
23646 23644 * block size.
23647 23645 *
23648 23646 *
23649 23647 * Arguments: dev - the device number
23650 23648 * arg - pointer to user provided dk_minfo_ext structure
23651 23649 * specifying the media type, logical block size,
23652 23650 * physical block size and disk capacity.
23653 23651 * flag - this argument is a pass through to ddi_copyxxx()
23654 23652 * directly from the mode argument of ioctl().
23655 23653 *
23656 23654 * Return Code: returns the value from sd_get_media_info_com
23657 23655 */
23658 23656 static int
23659 23657 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
23660 23658 {
23661 23659 struct dk_minfo_ext mie;
23662 23660 int rval = 0;
23663 23661
23664 23662 rval = sd_get_media_info_com(dev, &mie.dki_media_type,
23665 23663 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
23666 23664
23667 23665 if (rval)
23668 23666 return (rval);
23669 23667 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag))
23670 23668 rval = EFAULT;
23671 23669 return (rval);
23672 23670
23673 23671 }
23674 23672
23675 23673 /*
23676 23674 * Function: sd_watch_request_submit
23677 23675 *
23678 23676 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23679 23677 * depending on which is supported by device.
23680 23678 */
23681 23679 static opaque_t
23682 23680 sd_watch_request_submit(struct sd_lun *un)
23683 23681 {
23684 23682 dev_t dev;
23685 23683
23686 23684 /* All submissions are unified to use same device number */
23687 23685 dev = sd_make_device(SD_DEVINFO(un));
23688 23686
23689 23687 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23690 23688 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
23691 23689 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23692 23690 (caddr_t)dev));
23693 23691 } else {
23694 23692 return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
23695 23693 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23696 23694 (caddr_t)dev));
23697 23695 }
23698 23696 }
23699 23697
23700 23698
23701 23699 /*
23702 23700 * Function: sd_check_media
23703 23701 *
23704 23702 * Description: This utility routine implements the functionality for the
23705 23703 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23706 23704 * driver state changes from that specified by the user
23707 23705 * (inserted or ejected). For example, if the user specifies
23708 23706 * DKIO_EJECTED and the current media state is inserted this
23709 23707 * routine will immediately return DKIO_INSERTED. However, if the
23710 23708 * current media state is not inserted the user thread will be
23711 23709 * blocked until the drive state changes. If DKIO_NONE is specified
23712 23710 * the user thread will block until a drive state change occurs.
23713 23711 *
23714 23712 * Arguments: dev - the device number
23715 23713 * state - user pointer to a dkio_state, updated with the current
23716 23714 * drive state at return.
23717 23715 *
23718 23716 * Return Code: ENXIO
23719 23717 * EIO
23720 23718 * EAGAIN
23721 23719 * EINTR
23722 23720 */
23723 23721
23724 23722 static int
23725 23723 sd_check_media(dev_t dev, enum dkio_state state)
23726 23724 {
23727 23725 struct sd_lun *un = NULL;
23728 23726 enum dkio_state prev_state;
23729 23727 opaque_t token = NULL;
23730 23728 int rval = 0;
23731 23729 sd_ssc_t *ssc;
23732 23730
23733 23731 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23734 23732 return (ENXIO);
23735 23733 }
23736 23734
23737 23735 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
23738 23736
23739 23737 ssc = sd_ssc_init(un);
23740 23738
23741 23739 mutex_enter(SD_MUTEX(un));
23742 23740
23743 23741 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
23744 23742 "state=%x, mediastate=%x\n", state, un->un_mediastate);
23745 23743
23746 23744 prev_state = un->un_mediastate;
23747 23745
23748 23746 /* is there anything to do? */
23749 23747 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
23750 23748 /*
23751 23749 * submit the request to the scsi_watch service;
23752 23750 * scsi_media_watch_cb() does the real work
23753 23751 */
23754 23752 mutex_exit(SD_MUTEX(un));
23755 23753
23756 23754 /*
23757 23755 * This change handles the case where a scsi watch request is
23758 23756 * added to a device that is powered down. To accomplish this
23759 23757 * we power up the device before adding the scsi watch request,
23760 23758 * since the scsi watch sends a TUR directly to the device
23761 23759 * which the device cannot handle if it is powered down.
23762 23760 */
23763 23761 if (sd_pm_entry(un) != DDI_SUCCESS) {
23764 23762 mutex_enter(SD_MUTEX(un));
23765 23763 goto done;
23766 23764 }
23767 23765
23768 23766 token = sd_watch_request_submit(un);
23769 23767
23770 23768 sd_pm_exit(un);
23771 23769
23772 23770 mutex_enter(SD_MUTEX(un));
23773 23771 if (token == NULL) {
23774 23772 rval = EAGAIN;
23775 23773 goto done;
23776 23774 }
23777 23775
23778 23776 /*
23779 23777 * This is a special case IOCTL that doesn't return
23780 23778 * until the media state changes. Routine sdpower
23781 23779 * knows about and handles this so don't count it
23782 23780 * as an active cmd in the driver, which would
23783 23781 * keep the device busy to the pm framework.
23784 23782 * If the count isn't decremented the device can't
23785 23783 * be powered down.
23786 23784 */
23787 23785 un->un_ncmds_in_driver--;
23788 23786 ASSERT(un->un_ncmds_in_driver >= 0);
23789 23787
23790 23788 /*
23791 23789 * if a prior request had been made, this will be the same
23792 23790 * token, as scsi_watch was designed that way.
23793 23791 */
23794 23792 un->un_swr_token = token;
23795 23793 un->un_specified_mediastate = state;
23796 23794
23797 23795 /*
23798 23796 * now wait for media change
23799 23797 * we will not be signalled unless mediastate == state but it is
23800 23798 * still better to test for this condition, since there is a
23801 23799 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23802 23800 */
23803 23801 SD_TRACE(SD_LOG_COMMON, un,
23804 23802 "sd_check_media: waiting for media state change\n");
23805 23803 while (un->un_mediastate == state) {
23806 23804 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
23807 23805 SD_TRACE(SD_LOG_COMMON, un,
23808 23806 "sd_check_media: waiting for media state "
23809 23807 "was interrupted\n");
23810 23808 un->un_ncmds_in_driver++;
23811 23809 rval = EINTR;
23812 23810 goto done;
23813 23811 }
23814 23812 SD_TRACE(SD_LOG_COMMON, un,
23815 23813 "sd_check_media: received signal, state=%x\n",
23816 23814 un->un_mediastate);
23817 23815 }
23818 23816 /*
23819 23817 * Inc the counter to indicate the device once again
23820 23818 * has an active outstanding cmd.
23821 23819 */
23822 23820 un->un_ncmds_in_driver++;
23823 23821 }
23824 23822
23825 23823 /* invalidate geometry */
23826 23824 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
23827 23825 sr_ejected(un);
23828 23826 }
23829 23827
23830 23828 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
23831 23829 uint64_t capacity;
23832 23830 uint_t lbasize;
23833 23831
23834 23832 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
23835 23833 mutex_exit(SD_MUTEX(un));
23836 23834 /*
23837 23835 * Since the following routines use SD_PATH_DIRECT, we must
23838 23836 * call PM directly before the upcoming disk accesses. This
23839 23837 * may cause the disk to be power/spin up.
23840 23838 */
23841 23839
23842 23840 if (sd_pm_entry(un) == DDI_SUCCESS) {
23843 23841 rval = sd_send_scsi_READ_CAPACITY(ssc,
23844 23842 &capacity, &lbasize, SD_PATH_DIRECT);
23845 23843 if (rval != 0) {
23846 23844 sd_pm_exit(un);
23847 23845 if (rval == EIO)
23848 23846 sd_ssc_assessment(ssc,
23849 23847 SD_FMT_STATUS_CHECK);
23850 23848 else
23851 23849 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23852 23850 mutex_enter(SD_MUTEX(un));
23853 23851 goto done;
23854 23852 }
23855 23853 } else {
23856 23854 rval = EIO;
23857 23855 mutex_enter(SD_MUTEX(un));
23858 23856 goto done;
23859 23857 }
23860 23858 mutex_enter(SD_MUTEX(un));
23861 23859
23862 23860 sd_update_block_info(un, lbasize, capacity);
23863 23861
23864 23862 /*
23865 23863 * Check if the media in the device is writable or not
23866 23864 */
23867 23865 if (ISCD(un)) {
23868 23866 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
23869 23867 }
23870 23868
23871 23869 mutex_exit(SD_MUTEX(un));
23872 23870 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
23873 23871 if ((cmlb_validate(un->un_cmlbhandle, 0,
23874 23872 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
23875 23873 sd_set_pstats(un);
23876 23874 SD_TRACE(SD_LOG_IO_PARTITION, un,
23877 23875 "sd_check_media: un:0x%p pstats created and "
23878 23876 "set\n", un);
23879 23877 }
23880 23878
23881 23879 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
23882 23880 SD_PATH_DIRECT);
23883 23881
23884 23882 sd_pm_exit(un);
23885 23883
23886 23884 if (rval != 0) {
23887 23885 if (rval == EIO)
23888 23886 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23889 23887 else
23890 23888 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23891 23889 }
23892 23890
23893 23891 mutex_enter(SD_MUTEX(un));
23894 23892 }
23895 23893 done:
23896 23894 sd_ssc_fini(ssc);
23897 23895 un->un_f_watcht_stopped = FALSE;
23898 23896 if (token != NULL && un->un_swr_token != NULL) {
23899 23897 /*
23900 23898 * Use of this local token and the mutex ensures that we avoid
23901 23899 * some race conditions associated with terminating the
23902 23900 * scsi watch.
23903 23901 */
23904 23902 token = un->un_swr_token;
23905 23903 mutex_exit(SD_MUTEX(un));
23906 23904 (void) scsi_watch_request_terminate(token,
23907 23905 SCSI_WATCH_TERMINATE_WAIT);
23908 23906 if (scsi_watch_get_ref_count(token) == 0) {
23909 23907 mutex_enter(SD_MUTEX(un));
23910 23908 un->un_swr_token = (opaque_t)NULL;
23911 23909 } else {
23912 23910 mutex_enter(SD_MUTEX(un));
23913 23911 }
23914 23912 }
23915 23913
23916 23914 /*
23917 23915 * Update the capacity kstat value, if no media previously
23918 23916 * (capacity kstat is 0) and a media has been inserted
23919 23917 * (un_f_blockcount_is_valid == TRUE)
23920 23918 */
23921 23919 if (un->un_errstats) {
23922 23920 struct sd_errstats *stp = NULL;
23923 23921
23924 23922 stp = (struct sd_errstats *)un->un_errstats->ks_data;
23925 23923 if ((stp->sd_capacity.value.ui64 == 0) &&
23926 23924 (un->un_f_blockcount_is_valid == TRUE)) {
23927 23925 stp->sd_capacity.value.ui64 =
23928 23926 (uint64_t)((uint64_t)un->un_blockcount *
23929 23927 un->un_sys_blocksize);
23930 23928 }
23931 23929 }
23932 23930 mutex_exit(SD_MUTEX(un));
23933 23931 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
23934 23932 return (rval);
23935 23933 }
23936 23934
23937 23935
23938 23936 /*
23939 23937 * Function: sd_delayed_cv_broadcast
23940 23938 *
23941 23939 * Description: Delayed cv_broadcast to allow for target to recover from media
23942 23940 * insertion.
23943 23941 *
23944 23942 * Arguments: arg - driver soft state (unit) structure
23945 23943 */
23946 23944
23947 23945 static void
23948 23946 sd_delayed_cv_broadcast(void *arg)
23949 23947 {
23950 23948 struct sd_lun *un = arg;
23951 23949
23952 23950 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
23953 23951
23954 23952 mutex_enter(SD_MUTEX(un));
23955 23953 un->un_dcvb_timeid = NULL;
23956 23954 cv_broadcast(&un->un_state_cv);
23957 23955 mutex_exit(SD_MUTEX(un));
23958 23956 }
23959 23957
23960 23958
23961 23959 /*
23962 23960 * Function: sd_media_watch_cb
23963 23961 *
23964 23962 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23965 23963 * routine processes the TUR sense data and updates the driver
23966 23964 * state if a transition has occurred. The user thread
23967 23965 * (sd_check_media) is then signalled.
23968 23966 *
23969 23967 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23970 23968 * among multiple watches that share this callback function
23971 23969 * resultp - scsi watch facility result packet containing scsi
23972 23970 * packet, status byte and sense data
23973 23971 *
23974 23972 * Return Code: 0 for success, -1 for failure
23975 23973 */
23976 23974
23977 23975 static int
23978 23976 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
23979 23977 {
23980 23978 struct sd_lun *un;
23981 23979 struct scsi_status *statusp = resultp->statusp;
23982 23980 uint8_t *sensep = (uint8_t *)resultp->sensep;
23983 23981 enum dkio_state state = DKIO_NONE;
23984 23982 dev_t dev = (dev_t)arg;
23985 23983 uchar_t actual_sense_length;
23986 23984 uint8_t skey, asc, ascq;
23987 23985
23988 23986 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23989 23987 return (-1);
23990 23988 }
23991 23989 actual_sense_length = resultp->actual_sense_length;
23992 23990
23993 23991 mutex_enter(SD_MUTEX(un));
23994 23992 SD_TRACE(SD_LOG_COMMON, un,
23995 23993 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23996 23994 *((char *)statusp), (void *)sensep, actual_sense_length);
23997 23995
23998 23996 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
23999 23997 un->un_mediastate = DKIO_DEV_GONE;
24000 23998 cv_broadcast(&un->un_state_cv);
24001 23999 mutex_exit(SD_MUTEX(un));
24002 24000
24003 24001 return (0);
24004 24002 }
24005 24003
24006 24004 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
24007 24005 if (sd_gesn_media_data_valid(resultp->mmc_data)) {
24008 24006 if ((resultp->mmc_data[5] &
24009 24007 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
24010 24008 state = DKIO_INSERTED;
24011 24009 } else {
24012 24010 state = DKIO_EJECTED;
24013 24011 }
24014 24012 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
24015 24013 SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
24016 24014 sd_log_eject_request_event(un, KM_NOSLEEP);
24017 24015 }
24018 24016 }
24019 24017 } else if (sensep != NULL) {
24020 24018 /*
24021 24019 * If there was a check condition then sensep points to valid
24022 24020 * sense data. If status was not a check condition but a
24023 24021 * reservation or busy status then the new state is DKIO_NONE.
24024 24022 */
24025 24023 skey = scsi_sense_key(sensep);
24026 24024 asc = scsi_sense_asc(sensep);
24027 24025 ascq = scsi_sense_ascq(sensep);
24028 24026
24029 24027 SD_INFO(SD_LOG_COMMON, un,
24030 24028 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
24031 24029 skey, asc, ascq);
24032 24030 /* This routine only uses up to 13 bytes of sense data. */
24033 24031 if (actual_sense_length >= 13) {
24034 24032 if (skey == KEY_UNIT_ATTENTION) {
24035 24033 if (asc == 0x28) {
24036 24034 state = DKIO_INSERTED;
24037 24035 }
24038 24036 } else if (skey == KEY_NOT_READY) {
24039 24037 /*
24040 24038 * Sense data of 02/06/00 means that the
24041 24039 * drive could not read the media (No
24042 24040 * reference position found). In this case
24043 24041 * to prevent a hang on the DKIOCSTATE IOCTL
24044 24042 * we set the media state to DKIO_INSERTED.
24045 24043 */
24046 24044 if (asc == 0x06 && ascq == 0x00)
24047 24045 state = DKIO_INSERTED;
24048 24046
24049 24047 /*
24050 24048 * if 02/04/02 means that the host
24051 24049 * should send start command. Explicitly
24052 24050 * leave the media state as is
24053 24051 * (inserted) as the media is inserted
24054 24052 * and host has stopped device for PM
24055 24053 * reasons. Upon next true read/write
24056 24054 * to this media will bring the
24057 24055 * device to the right state good for
24058 24056 * media access.
24059 24057 */
24060 24058 if (asc == 0x3a) {
24061 24059 state = DKIO_EJECTED;
24062 24060 } else {
24063 24061 /*
24064 24062 * If the drive is busy with an
24065 24063 * operation or long write, keep the
24066 24064 * media in an inserted state.
24067 24065 */
24068 24066
24069 24067 if ((asc == 0x04) &&
24070 24068 ((ascq == 0x02) ||
24071 24069 (ascq == 0x07) ||
24072 24070 (ascq == 0x08))) {
24073 24071 state = DKIO_INSERTED;
24074 24072 }
24075 24073 }
24076 24074 } else if (skey == KEY_NO_SENSE) {
24077 24075 if ((asc == 0x00) && (ascq == 0x00)) {
24078 24076 /*
24079 24077 * Sense Data 00/00/00 does not provide
24080 24078 * any information about the state of
24081 24079 * the media. Ignore it.
24082 24080 */
24083 24081 mutex_exit(SD_MUTEX(un));
24084 24082 return (0);
24085 24083 }
24086 24084 }
24087 24085 }
24088 24086 } else if ((*((char *)statusp) == STATUS_GOOD) &&
24089 24087 (resultp->pkt->pkt_reason == CMD_CMPLT)) {
24090 24088 state = DKIO_INSERTED;
24091 24089 }
24092 24090
24093 24091 SD_TRACE(SD_LOG_COMMON, un,
24094 24092 "sd_media_watch_cb: state=%x, specified=%x\n",
24095 24093 state, un->un_specified_mediastate);
24096 24094
24097 24095 /*
24098 24096 * now signal the waiting thread if this is *not* the specified state;
24099 24097 * delay the signal if the state is DKIO_INSERTED to allow the target
24100 24098 * to recover
24101 24099 */
24102 24100 if (state != un->un_specified_mediastate) {
24103 24101 un->un_mediastate = state;
24104 24102 if (state == DKIO_INSERTED) {
24105 24103 /*
24106 24104 * delay the signal to give the drive a chance
24107 24105 * to do what it apparently needs to do
24108 24106 */
24109 24107 SD_TRACE(SD_LOG_COMMON, un,
24110 24108 "sd_media_watch_cb: delayed cv_broadcast\n");
24111 24109 if (un->un_dcvb_timeid == NULL) {
24112 24110 un->un_dcvb_timeid =
24113 24111 timeout(sd_delayed_cv_broadcast, un,
24114 24112 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
24115 24113 }
24116 24114 } else {
24117 24115 SD_TRACE(SD_LOG_COMMON, un,
24118 24116 "sd_media_watch_cb: immediate cv_broadcast\n");
24119 24117 cv_broadcast(&un->un_state_cv);
24120 24118 }
24121 24119 }
24122 24120 mutex_exit(SD_MUTEX(un));
24123 24121 return (0);
24124 24122 }
24125 24123
24126 24124
24127 24125 /*
24128 24126 * Function: sd_dkio_get_temp
24129 24127 *
24130 24128 * Description: This routine is the driver entry point for handling ioctl
24131 24129 * requests to get the disk temperature.
24132 24130 *
24133 24131 * Arguments: dev - the device number
24134 24132 * arg - pointer to user provided dk_temperature structure.
24135 24133 * flag - this argument is a pass through to ddi_copyxxx()
24136 24134 * directly from the mode argument of ioctl().
24137 24135 *
24138 24136 * Return Code: 0
24139 24137 * EFAULT
24140 24138 * ENXIO
24141 24139 * EAGAIN
24142 24140 */
24143 24141
24144 24142 static int
24145 24143 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
24146 24144 {
24147 24145 struct sd_lun *un = NULL;
24148 24146 struct dk_temperature *dktemp = NULL;
24149 24147 uchar_t *temperature_page;
24150 24148 int rval = 0;
24151 24149 int path_flag = SD_PATH_STANDARD;
24152 24150 sd_ssc_t *ssc;
24153 24151
24154 24152 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24155 24153 return (ENXIO);
24156 24154 }
24157 24155
24158 24156 ssc = sd_ssc_init(un);
24159 24157 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
24160 24158
24161 24159 /* copyin the disk temp argument to get the user flags */
24162 24160 if (ddi_copyin((void *)arg, dktemp,
24163 24161 sizeof (struct dk_temperature), flag) != 0) {
24164 24162 rval = EFAULT;
24165 24163 goto done;
24166 24164 }
24167 24165
24168 24166 /* Initialize the temperature to invalid. */
24169 24167 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24170 24168 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24171 24169
24172 24170 /*
24173 24171 * Note: Investigate removing the "bypass pm" semantic.
24174 24172 * Can we just bypass PM always?
24175 24173 */
24176 24174 if (dktemp->dkt_flags & DKT_BYPASS_PM) {
24177 24175 path_flag = SD_PATH_DIRECT;
24178 24176 ASSERT(!mutex_owned(&un->un_pm_mutex));
24179 24177 mutex_enter(&un->un_pm_mutex);
24180 24178 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
24181 24179 /*
24182 24180 * If DKT_BYPASS_PM is set, and the drive happens to be
24183 24181 * in low power mode, we can not wake it up, Need to
24184 24182 * return EAGAIN.
24185 24183 */
24186 24184 mutex_exit(&un->un_pm_mutex);
24187 24185 rval = EAGAIN;
24188 24186 goto done;
24189 24187 } else {
24190 24188 /*
24191 24189 * Indicate to PM the device is busy. This is required
24192 24190 * to avoid a race - i.e. the ioctl is issuing a
24193 24191 * command and the pm framework brings down the device
24194 24192 * to low power mode (possible power cut-off on some
24195 24193 * platforms).
24196 24194 */
24197 24195 mutex_exit(&un->un_pm_mutex);
24198 24196 if (sd_pm_entry(un) != DDI_SUCCESS) {
24199 24197 rval = EAGAIN;
24200 24198 goto done;
24201 24199 }
24202 24200 }
24203 24201 }
24204 24202
24205 24203 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
24206 24204
24207 24205 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
24208 24206 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
24209 24207 if (rval != 0)
24210 24208 goto done2;
24211 24209
24212 24210 /*
24213 24211 * For the current temperature verify that the parameter length is 0x02
24214 24212 * and the parameter code is 0x00
24215 24213 */
24216 24214 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
24217 24215 (temperature_page[5] == 0x00)) {
24218 24216 if (temperature_page[9] == 0xFF) {
24219 24217 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24220 24218 } else {
24221 24219 dktemp->dkt_cur_temp = (short)(temperature_page[9]);
24222 24220 }
24223 24221 }
24224 24222
24225 24223 /*
24226 24224 * For the reference temperature verify that the parameter
24227 24225 * length is 0x02 and the parameter code is 0x01
24228 24226 */
24229 24227 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
24230 24228 (temperature_page[11] == 0x01)) {
24231 24229 if (temperature_page[15] == 0xFF) {
24232 24230 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24233 24231 } else {
24234 24232 dktemp->dkt_ref_temp = (short)(temperature_page[15]);
24235 24233 }
24236 24234 }
24237 24235
24238 24236 /* Do the copyout regardless of the temperature commands status. */
24239 24237 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
24240 24238 flag) != 0) {
24241 24239 rval = EFAULT;
24242 24240 goto done1;
24243 24241 }
24244 24242
24245 24243 done2:
24246 24244 if (rval != 0) {
24247 24245 if (rval == EIO)
24248 24246 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24249 24247 else
24250 24248 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24251 24249 }
24252 24250 done1:
24253 24251 if (path_flag == SD_PATH_DIRECT) {
24254 24252 sd_pm_exit(un);
24255 24253 }
24256 24254
24257 24255 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
24258 24256 done:
24259 24257 sd_ssc_fini(ssc);
24260 24258 if (dktemp != NULL) {
24261 24259 kmem_free(dktemp, sizeof (struct dk_temperature));
24262 24260 }
24263 24261
24264 24262 return (rval);
24265 24263 }
24266 24264
24267 24265
24268 24266 /*
24269 24267 * Function: sd_log_page_supported
24270 24268 *
24271 24269 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
24272 24270 * supported log pages.
24273 24271 *
24274 24272 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24275 24273 * structure for this target.
24276 24274 * log_page -
24277 24275 *
24278 24276 * Return Code: -1 - on error (log sense is optional and may not be supported).
24279 24277 * 0 - log page not found.
24280 24278 * 1 - log page found.
24281 24279 */
24282 24280
24283 24281 static int
24284 24282 sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24285 24283 {
24286 24284 uchar_t *log_page_data;
24287 24285 int i;
24288 24286 int match = 0;
24289 24287 int log_size;
24290 24288 int status = 0;
24291 24289 struct sd_lun *un;
24292 24290
24293 24291 ASSERT(ssc != NULL);
24294 24292 un = ssc->ssc_un;
24295 24293 ASSERT(un != NULL);
24296 24294
24297 24295 log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
24298 24296
24299 24297 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
24300 24298 SD_PATH_DIRECT);
24301 24299
24302 24300 if (status != 0) {
24303 24301 if (status == EIO) {
24304 24302 /*
24305 24303 * Some disks do not support log sense, we
24306 24304 * should ignore this kind of error(sense key is
24307 24305 * 0x5 - illegal request).
24308 24306 */
24309 24307 uint8_t *sensep;
24310 24308 int senlen;
24311 24309
24312 24310 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
24313 24311 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
24314 24312 ssc->ssc_uscsi_cmd->uscsi_rqresid);
24315 24313
24316 24314 if (senlen > 0 &&
24317 24315 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
24318 24316 sd_ssc_assessment(ssc,
24319 24317 SD_FMT_IGNORE_COMPROMISE);
24320 24318 } else {
24321 24319 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24322 24320 }
24323 24321 } else {
24324 24322 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24325 24323 }
24326 24324
24327 24325 SD_ERROR(SD_LOG_COMMON, un,
24328 24326 "sd_log_page_supported: failed log page retrieval\n");
24329 24327 kmem_free(log_page_data, 0xFF);
24330 24328 return (-1);
24331 24329 }
24332 24330
24333 24331 log_size = log_page_data[3];
24334 24332
24335 24333 /*
24336 24334 * The list of supported log pages start from the fourth byte. Check
24337 24335 * until we run out of log pages or a match is found.
24338 24336 */
24339 24337 for (i = 4; (i < (log_size + 4)) && !match; i++) {
24340 24338 if (log_page_data[i] == log_page) {
24341 24339 match++;
24342 24340 }
24343 24341 }
24344 24342 kmem_free(log_page_data, 0xFF);
24345 24343 return (match);
24346 24344 }
24347 24345
24348 24346
24349 24347 /*
24350 24348 * Function: sd_mhdioc_failfast
24351 24349 *
24352 24350 * Description: This routine is the driver entry point for handling ioctl
24353 24351 * requests to enable/disable the multihost failfast option.
24354 24352 * (MHIOCENFAILFAST)
24355 24353 *
24356 24354 * Arguments: dev - the device number
24357 24355 * arg - user specified probing interval.
24358 24356 * flag - this argument is a pass through to ddi_copyxxx()
24359 24357 * directly from the mode argument of ioctl().
24360 24358 *
24361 24359 * Return Code: 0
24362 24360 * EFAULT
24363 24361 * ENXIO
24364 24362 */
24365 24363
24366 24364 static int
24367 24365 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
24368 24366 {
24369 24367 struct sd_lun *un = NULL;
24370 24368 int mh_time;
24371 24369 int rval = 0;
24372 24370
24373 24371 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24374 24372 return (ENXIO);
24375 24373 }
24376 24374
24377 24375 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
24378 24376 return (EFAULT);
24379 24377
24380 24378 if (mh_time) {
24381 24379 mutex_enter(SD_MUTEX(un));
24382 24380 un->un_resvd_status |= SD_FAILFAST;
24383 24381 mutex_exit(SD_MUTEX(un));
24384 24382 /*
24385 24383 * If mh_time is INT_MAX, then this ioctl is being used for
24386 24384 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24387 24385 */
24388 24386 if (mh_time != INT_MAX) {
24389 24387 rval = sd_check_mhd(dev, mh_time);
24390 24388 }
24391 24389 } else {
24392 24390 (void) sd_check_mhd(dev, 0);
24393 24391 mutex_enter(SD_MUTEX(un));
24394 24392 un->un_resvd_status &= ~SD_FAILFAST;
24395 24393 mutex_exit(SD_MUTEX(un));
24396 24394 }
24397 24395 return (rval);
24398 24396 }
24399 24397
24400 24398
24401 24399 /*
24402 24400 * Function: sd_mhdioc_takeown
24403 24401 *
24404 24402 * Description: This routine is the driver entry point for handling ioctl
24405 24403 * requests to forcefully acquire exclusive access rights to the
24406 24404 * multihost disk (MHIOCTKOWN).
24407 24405 *
24408 24406 * Arguments: dev - the device number
24409 24407 * arg - user provided structure specifying the delay
24410 24408 * parameters in milliseconds
24411 24409 * flag - this argument is a pass through to ddi_copyxxx()
24412 24410 * directly from the mode argument of ioctl().
24413 24411 *
24414 24412 * Return Code: 0
24415 24413 * EFAULT
24416 24414 * ENXIO
24417 24415 */
24418 24416
24419 24417 static int
24420 24418 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
24421 24419 {
24422 24420 struct sd_lun *un = NULL;
24423 24421 struct mhioctkown *tkown = NULL;
24424 24422 int rval = 0;
24425 24423
24426 24424 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24427 24425 return (ENXIO);
24428 24426 }
24429 24427
24430 24428 if (arg != NULL) {
24431 24429 tkown = (struct mhioctkown *)
24432 24430 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
24433 24431 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
24434 24432 if (rval != 0) {
24435 24433 rval = EFAULT;
24436 24434 goto error;
24437 24435 }
24438 24436 }
24439 24437
24440 24438 rval = sd_take_ownership(dev, tkown);
24441 24439 mutex_enter(SD_MUTEX(un));
24442 24440 if (rval == 0) {
24443 24441 un->un_resvd_status |= SD_RESERVE;
24444 24442 if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
24445 24443 sd_reinstate_resv_delay =
24446 24444 tkown->reinstate_resv_delay * 1000;
24447 24445 } else {
24448 24446 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
24449 24447 }
24450 24448 /*
24451 24449 * Give the scsi_watch routine interval set by
24452 24450 * the MHIOCENFAILFAST ioctl precedence here.
24453 24451 */
24454 24452 if ((un->un_resvd_status & SD_FAILFAST) == 0) {
24455 24453 mutex_exit(SD_MUTEX(un));
24456 24454 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000);
24457 24455 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24458 24456 "sd_mhdioc_takeown : %d\n",
24459 24457 sd_reinstate_resv_delay);
24460 24458 } else {
24461 24459 mutex_exit(SD_MUTEX(un));
24462 24460 }
24463 24461 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
24464 24462 sd_mhd_reset_notify_cb, (caddr_t)un);
24465 24463 } else {
24466 24464 un->un_resvd_status &= ~SD_RESERVE;
24467 24465 mutex_exit(SD_MUTEX(un));
24468 24466 }
24469 24467
24470 24468 error:
24471 24469 if (tkown != NULL) {
24472 24470 kmem_free(tkown, sizeof (struct mhioctkown));
24473 24471 }
24474 24472 return (rval);
24475 24473 }
24476 24474
24477 24475
24478 24476 /*
24479 24477 * Function: sd_mhdioc_release
24480 24478 *
24481 24479 * Description: This routine is the driver entry point for handling ioctl
24482 24480 * requests to release exclusive access rights to the multihost
24483 24481 * disk (MHIOCRELEASE).
24484 24482 *
24485 24483 * Arguments: dev - the device number
24486 24484 *
24487 24485 * Return Code: 0
24488 24486 * ENXIO
24489 24487 */
24490 24488
24491 24489 static int
24492 24490 sd_mhdioc_release(dev_t dev)
24493 24491 {
24494 24492 struct sd_lun *un = NULL;
24495 24493 timeout_id_t resvd_timeid_save;
24496 24494 int resvd_status_save;
24497 24495 int rval = 0;
24498 24496
24499 24497 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24500 24498 return (ENXIO);
24501 24499 }
24502 24500
24503 24501 mutex_enter(SD_MUTEX(un));
24504 24502 resvd_status_save = un->un_resvd_status;
24505 24503 un->un_resvd_status &=
24506 24504 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
24507 24505 if (un->un_resvd_timeid) {
24508 24506 resvd_timeid_save = un->un_resvd_timeid;
24509 24507 un->un_resvd_timeid = NULL;
24510 24508 mutex_exit(SD_MUTEX(un));
24511 24509 (void) untimeout(resvd_timeid_save);
24512 24510 } else {
24513 24511 mutex_exit(SD_MUTEX(un));
24514 24512 }
24515 24513
24516 24514 /*
24517 24515 * destroy any pending timeout thread that may be attempting to
24518 24516 * reinstate reservation on this device.
24519 24517 */
24520 24518 sd_rmv_resv_reclaim_req(dev);
24521 24519
24522 24520 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
24523 24521 mutex_enter(SD_MUTEX(un));
24524 24522 if ((un->un_mhd_token) &&
24525 24523 ((un->un_resvd_status & SD_FAILFAST) == 0)) {
24526 24524 mutex_exit(SD_MUTEX(un));
24527 24525 (void) sd_check_mhd(dev, 0);
24528 24526 } else {
24529 24527 mutex_exit(SD_MUTEX(un));
24530 24528 }
24531 24529 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
24532 24530 sd_mhd_reset_notify_cb, (caddr_t)un);
24533 24531 } else {
24534 24532 /*
24535 24533 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24536 24534 */
24537 24535 mutex_enter(SD_MUTEX(un));
24538 24536 un->un_resvd_status = resvd_status_save;
24539 24537 mutex_exit(SD_MUTEX(un));
24540 24538 }
24541 24539 return (rval);
24542 24540 }
24543 24541
24544 24542
24545 24543 /*
24546 24544 * Function: sd_mhdioc_register_devid
24547 24545 *
24548 24546 * Description: This routine is the driver entry point for handling ioctl
24549 24547 * requests to register the device id (MHIOCREREGISTERDEVID).
24550 24548 *
24551 24549 * Note: The implementation for this ioctl has been updated to
24552 24550 * be consistent with the original PSARC case (1999/357)
24553 24551 * (4375899, 4241671, 4220005)
24554 24552 *
24555 24553 * Arguments: dev - the device number
24556 24554 *
24557 24555 * Return Code: 0
24558 24556 * ENXIO
24559 24557 */
24560 24558
24561 24559 static int
24562 24560 sd_mhdioc_register_devid(dev_t dev)
24563 24561 {
24564 24562 struct sd_lun *un = NULL;
24565 24563 int rval = 0;
24566 24564 sd_ssc_t *ssc;
24567 24565
24568 24566 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24569 24567 return (ENXIO);
24570 24568 }
24571 24569
24572 24570 ASSERT(!mutex_owned(SD_MUTEX(un)));
24573 24571
24574 24572 mutex_enter(SD_MUTEX(un));
24575 24573
24576 24574 /* If a devid already exists, de-register it */
24577 24575 if (un->un_devid != NULL) {
24578 24576 ddi_devid_unregister(SD_DEVINFO(un));
24579 24577 /*
24580 24578 * After unregister devid, needs to free devid memory
24581 24579 */
24582 24580 ddi_devid_free(un->un_devid);
24583 24581 un->un_devid = NULL;
24584 24582 }
24585 24583
24586 24584 /* Check for reservation conflict */
24587 24585 mutex_exit(SD_MUTEX(un));
24588 24586 ssc = sd_ssc_init(un);
24589 24587 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
24590 24588 mutex_enter(SD_MUTEX(un));
24591 24589
24592 24590 switch (rval) {
24593 24591 case 0:
24594 24592 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
24595 24593 break;
24596 24594 case EACCES:
24597 24595 break;
24598 24596 default:
24599 24597 rval = EIO;
24600 24598 }
24601 24599
24602 24600 mutex_exit(SD_MUTEX(un));
24603 24601 if (rval != 0) {
24604 24602 if (rval == EIO)
24605 24603 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24606 24604 else
24607 24605 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24608 24606 }
24609 24607 sd_ssc_fini(ssc);
24610 24608 return (rval);
24611 24609 }
24612 24610
24613 24611
24614 24612 /*
24615 24613 * Function: sd_mhdioc_inkeys
24616 24614 *
24617 24615 * Description: This routine is the driver entry point for handling ioctl
24618 24616 * requests to issue the SCSI-3 Persistent In Read Keys command
24619 24617 * to the device (MHIOCGRP_INKEYS).
24620 24618 *
24621 24619 * Arguments: dev - the device number
24622 24620 * arg - user provided in_keys structure
24623 24621 * flag - this argument is a pass through to ddi_copyxxx()
24624 24622 * directly from the mode argument of ioctl().
24625 24623 *
24626 24624 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24627 24625 * ENXIO
24628 24626 * EFAULT
24629 24627 */
24630 24628
24631 24629 static int
24632 24630 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
24633 24631 {
24634 24632 struct sd_lun *un;
24635 24633 mhioc_inkeys_t inkeys;
24636 24634 int rval = 0;
24637 24635
24638 24636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24639 24637 return (ENXIO);
24640 24638 }
24641 24639
24642 24640 #ifdef _MULTI_DATAMODEL
24643 24641 switch (ddi_model_convert_from(flag & FMODELS)) {
24644 24642 case DDI_MODEL_ILP32: {
24645 24643 struct mhioc_inkeys32 inkeys32;
24646 24644
24647 24645 if (ddi_copyin(arg, &inkeys32,
24648 24646 sizeof (struct mhioc_inkeys32), flag) != 0) {
24649 24647 return (EFAULT);
24650 24648 }
24651 24649 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
24652 24650 if ((rval = sd_persistent_reservation_in_read_keys(un,
24653 24651 &inkeys, flag)) != 0) {
24654 24652 return (rval);
24655 24653 }
24656 24654 inkeys32.generation = inkeys.generation;
24657 24655 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
24658 24656 flag) != 0) {
24659 24657 return (EFAULT);
24660 24658 }
24661 24659 break;
24662 24660 }
24663 24661 case DDI_MODEL_NONE:
24664 24662 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
24665 24663 flag) != 0) {
24666 24664 return (EFAULT);
24667 24665 }
24668 24666 if ((rval = sd_persistent_reservation_in_read_keys(un,
24669 24667 &inkeys, flag)) != 0) {
24670 24668 return (rval);
24671 24669 }
24672 24670 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
24673 24671 flag) != 0) {
24674 24672 return (EFAULT);
24675 24673 }
24676 24674 break;
24677 24675 }
24678 24676
24679 24677 #else /* ! _MULTI_DATAMODEL */
24680 24678
24681 24679 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
24682 24680 return (EFAULT);
24683 24681 }
24684 24682 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
24685 24683 if (rval != 0) {
24686 24684 return (rval);
24687 24685 }
24688 24686 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
24689 24687 return (EFAULT);
24690 24688 }
24691 24689
24692 24690 #endif /* _MULTI_DATAMODEL */
24693 24691
24694 24692 return (rval);
24695 24693 }
24696 24694
24697 24695
24698 24696 /*
24699 24697 * Function: sd_mhdioc_inresv
24700 24698 *
24701 24699 * Description: This routine is the driver entry point for handling ioctl
24702 24700 * requests to issue the SCSI-3 Persistent In Read Reservations
24703 24701 * command to the device (MHIOCGRP_INKEYS).
24704 24702 *
24705 24703 * Arguments: dev - the device number
24706 24704 * arg - user provided in_resv structure
24707 24705 * flag - this argument is a pass through to ddi_copyxxx()
24708 24706 * directly from the mode argument of ioctl().
24709 24707 *
24710 24708 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24711 24709 * ENXIO
24712 24710 * EFAULT
24713 24711 */
24714 24712
24715 24713 static int
24716 24714 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
24717 24715 {
24718 24716 struct sd_lun *un;
24719 24717 mhioc_inresvs_t inresvs;
24720 24718 int rval = 0;
24721 24719
24722 24720 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24723 24721 return (ENXIO);
24724 24722 }
24725 24723
24726 24724 #ifdef _MULTI_DATAMODEL
24727 24725
24728 24726 switch (ddi_model_convert_from(flag & FMODELS)) {
24729 24727 case DDI_MODEL_ILP32: {
24730 24728 struct mhioc_inresvs32 inresvs32;
24731 24729
24732 24730 if (ddi_copyin(arg, &inresvs32,
24733 24731 sizeof (struct mhioc_inresvs32), flag) != 0) {
24734 24732 return (EFAULT);
24735 24733 }
24736 24734 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
24737 24735 if ((rval = sd_persistent_reservation_in_read_resv(un,
24738 24736 &inresvs, flag)) != 0) {
24739 24737 return (rval);
24740 24738 }
24741 24739 inresvs32.generation = inresvs.generation;
24742 24740 if (ddi_copyout(&inresvs32, arg,
24743 24741 sizeof (struct mhioc_inresvs32), flag) != 0) {
24744 24742 return (EFAULT);
24745 24743 }
24746 24744 break;
24747 24745 }
24748 24746 case DDI_MODEL_NONE:
24749 24747 if (ddi_copyin(arg, &inresvs,
24750 24748 sizeof (mhioc_inresvs_t), flag) != 0) {
24751 24749 return (EFAULT);
24752 24750 }
24753 24751 if ((rval = sd_persistent_reservation_in_read_resv(un,
24754 24752 &inresvs, flag)) != 0) {
24755 24753 return (rval);
24756 24754 }
24757 24755 if (ddi_copyout(&inresvs, arg,
24758 24756 sizeof (mhioc_inresvs_t), flag) != 0) {
24759 24757 return (EFAULT);
24760 24758 }
24761 24759 break;
24762 24760 }
24763 24761
24764 24762 #else /* ! _MULTI_DATAMODEL */
24765 24763
24766 24764 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
24767 24765 return (EFAULT);
24768 24766 }
24769 24767 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
24770 24768 if (rval != 0) {
24771 24769 return (rval);
24772 24770 }
24773 24771 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
24774 24772 return (EFAULT);
24775 24773 }
24776 24774
24777 24775 #endif /* ! _MULTI_DATAMODEL */
24778 24776
24779 24777 return (rval);
24780 24778 }
24781 24779
24782 24780
24783 24781 /*
24784 24782 * The following routines support the clustering functionality described below
24785 24783 * and implement lost reservation reclaim functionality.
24786 24784 *
24787 24785 * Clustering
24788 24786 * ----------
24789 24787 * The clustering code uses two different, independent forms of SCSI
24790 24788 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24791 24789 * Persistent Group Reservations. For any particular disk, it will use either
24792 24790 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24793 24791 *
24794 24792 * SCSI-2
24795 24793 * The cluster software takes ownership of a multi-hosted disk by issuing the
24796 24794 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24797 24795 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24798 24796 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24799 24797 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24800 24798 * driver. The meaning of failfast is that if the driver (on this host) ever
24801 24799 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24802 24800 * it should immediately panic the host. The motivation for this ioctl is that
24803 24801 * if this host does encounter reservation conflict, the underlying cause is
24804 24802 * that some other host of the cluster has decided that this host is no longer
24805 24803 * in the cluster and has seized control of the disks for itself. Since this
24806 24804 * host is no longer in the cluster, it ought to panic itself. The
24807 24805 * MHIOCENFAILFAST ioctl does two things:
24808 24806 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24809 24807 * error to panic the host
24810 24808 * (b) it sets up a periodic timer to test whether this host still has
24811 24809 * "access" (in that no other host has reserved the device): if the
24812 24810 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24813 24811 * purpose of that periodic timer is to handle scenarios where the host is
24814 24812 * otherwise temporarily quiescent, temporarily doing no real i/o.
24815 24813 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24816 24814 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24817 24815 * the device itself.
24818 24816 *
24819 24817 * SCSI-3 PGR
24820 24818 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24821 24819 * facility is supported through the shared multihost disk ioctls
24822 24820 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24823 24821 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR)
24824 24822 *
24825 24823 * Reservation Reclaim:
24826 24824 * --------------------
24827 24825 * To support the lost reservation reclaim operations this driver creates a
24828 24826 * single thread to handle reinstating reservations on all devices that have
24829 24827 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24830 24828 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24831 24829 * and the reservation reclaim thread loops through the requests to regain the
24832 24830 * lost reservations.
24833 24831 */
24834 24832
24835 24833 /*
24836 24834 * Function: sd_check_mhd()
24837 24835 *
24838 24836 * Description: This function sets up and submits a scsi watch request or
24839 24837 * terminates an existing watch request. This routine is used in
24840 24838 * support of reservation reclaim.
24841 24839 *
24842 24840 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24843 24841 * among multiple watches that share the callback function
24844 24842 * interval - the number of microseconds specifying the watch
24845 24843 * interval for issuing TEST UNIT READY commands. If
24846 24844 * set to 0 the watch should be terminated. If the
24847 24845 * interval is set to 0 and if the device is required
24848 24846 * to hold reservation while disabling failfast, the
24849 24847 * watch is restarted with an interval of
24850 24848 * reinstate_resv_delay.
24851 24849 *
24852 24850 * Return Code: 0 - Successful submit/terminate of scsi watch request
24853 24851 * ENXIO - Indicates an invalid device was specified
24854 24852 * EAGAIN - Unable to submit the scsi watch request
24855 24853 */
24856 24854
24857 24855 static int
24858 24856 sd_check_mhd(dev_t dev, int interval)
24859 24857 {
24860 24858 struct sd_lun *un;
24861 24859 opaque_t token;
24862 24860
24863 24861 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24864 24862 return (ENXIO);
24865 24863 }
24866 24864
24867 24865 /* is this a watch termination request? */
24868 24866 if (interval == 0) {
24869 24867 mutex_enter(SD_MUTEX(un));
24870 24868 /* if there is an existing watch task then terminate it */
24871 24869 if (un->un_mhd_token) {
24872 24870 token = un->un_mhd_token;
24873 24871 un->un_mhd_token = NULL;
24874 24872 mutex_exit(SD_MUTEX(un));
24875 24873 (void) scsi_watch_request_terminate(token,
24876 24874 SCSI_WATCH_TERMINATE_ALL_WAIT);
24877 24875 mutex_enter(SD_MUTEX(un));
24878 24876 } else {
24879 24877 mutex_exit(SD_MUTEX(un));
24880 24878 /*
24881 24879 * Note: If we return here we don't check for the
24882 24880 * failfast case. This is the original legacy
24883 24881 * implementation but perhaps we should be checking
24884 24882 * the failfast case.
24885 24883 */
24886 24884 return (0);
24887 24885 }
24888 24886 /*
24889 24887 * If the device is required to hold reservation while
24890 24888 * disabling failfast, we need to restart the scsi_watch
24891 24889 * routine with an interval of reinstate_resv_delay.
24892 24890 */
24893 24891 if (un->un_resvd_status & SD_RESERVE) {
24894 24892 interval = sd_reinstate_resv_delay/1000;
24895 24893 } else {
24896 24894 /* no failfast so bail */
24897 24895 mutex_exit(SD_MUTEX(un));
24898 24896 return (0);
24899 24897 }
24900 24898 mutex_exit(SD_MUTEX(un));
24901 24899 }
24902 24900
24903 24901 /*
24904 24902 * adjust minimum time interval to 1 second,
24905 24903 * and convert from msecs to usecs
24906 24904 */
24907 24905 if (interval > 0 && interval < 1000) {
24908 24906 interval = 1000;
24909 24907 }
24910 24908 interval *= 1000;
24911 24909
24912 24910 /*
24913 24911 * submit the request to the scsi_watch service
24914 24912 */
24915 24913 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
24916 24914 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
24917 24915 if (token == NULL) {
24918 24916 return (EAGAIN);
24919 24917 }
24920 24918
24921 24919 /*
24922 24920 * save token for termination later on
24923 24921 */
24924 24922 mutex_enter(SD_MUTEX(un));
24925 24923 un->un_mhd_token = token;
24926 24924 mutex_exit(SD_MUTEX(un));
24927 24925 return (0);
24928 24926 }
24929 24927
24930 24928
24931 24929 /*
24932 24930 * Function: sd_mhd_watch_cb()
24933 24931 *
24934 24932 * Description: This function is the call back function used by the scsi watch
24935 24933 * facility. The scsi watch facility sends the "Test Unit Ready"
24936 24934 * and processes the status. If applicable (i.e. a "Unit Attention"
24937 24935 * status and automatic "Request Sense" not used) the scsi watch
24938 24936 * facility will send a "Request Sense" and retrieve the sense data
24939 24937 * to be passed to this callback function. In either case the
24940 24938 * automatic "Request Sense" or the facility submitting one, this
24941 24939 * callback is passed the status and sense data.
24942 24940 *
24943 24941 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24944 24942 * among multiple watches that share this callback function
24945 24943 * resultp - scsi watch facility result packet containing scsi
24946 24944 * packet, status byte and sense data
24947 24945 *
24948 24946 * Return Code: 0 - continue the watch task
24949 24947 * non-zero - terminate the watch task
24950 24948 */
24951 24949
24952 24950 static int
24953 24951 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
24954 24952 {
24955 24953 struct sd_lun *un;
24956 24954 struct scsi_status *statusp;
24957 24955 uint8_t *sensep;
24958 24956 struct scsi_pkt *pkt;
24959 24957 uchar_t actual_sense_length;
24960 24958 dev_t dev = (dev_t)arg;
24961 24959
24962 24960 ASSERT(resultp != NULL);
24963 24961 statusp = resultp->statusp;
24964 24962 sensep = (uint8_t *)resultp->sensep;
24965 24963 pkt = resultp->pkt;
24966 24964 actual_sense_length = resultp->actual_sense_length;
24967 24965
24968 24966 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24969 24967 return (ENXIO);
24970 24968 }
24971 24969
24972 24970 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24973 24971 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24974 24972 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
24975 24973
24976 24974 /* Begin processing of the status and/or sense data */
24977 24975 if (pkt->pkt_reason != CMD_CMPLT) {
24978 24976 /* Handle the incomplete packet */
24979 24977 sd_mhd_watch_incomplete(un, pkt);
24980 24978 return (0);
24981 24979 } else if (*((unsigned char *)statusp) != STATUS_GOOD) {
24982 24980 if (*((unsigned char *)statusp)
24983 24981 == STATUS_RESERVATION_CONFLICT) {
24984 24982 /*
24985 24983 * Handle a reservation conflict by panicking if
24986 24984 * configured for failfast or by logging the conflict
24987 24985 * and updating the reservation status
24988 24986 */
24989 24987 mutex_enter(SD_MUTEX(un));
24990 24988 if ((un->un_resvd_status & SD_FAILFAST) &&
24991 24989 (sd_failfast_enable)) {
24992 24990 sd_panic_for_res_conflict(un);
24993 24991 /*NOTREACHED*/
24994 24992 }
24995 24993 SD_INFO(SD_LOG_IOCTL_MHD, un,
24996 24994 "sd_mhd_watch_cb: Reservation Conflict\n");
24997 24995 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
24998 24996 mutex_exit(SD_MUTEX(un));
24999 24997 }
25000 24998 }
25001 24999
25002 25000 if (sensep != NULL) {
25003 25001 if (actual_sense_length >= (SENSE_LENGTH - 2)) {
25004 25002 mutex_enter(SD_MUTEX(un));
25005 25003 if ((scsi_sense_asc(sensep) ==
25006 25004 SD_SCSI_RESET_SENSE_CODE) &&
25007 25005 (un->un_resvd_status & SD_RESERVE)) {
25008 25006 /*
25009 25007 * The additional sense code indicates a power
25010 25008 * on or bus device reset has occurred; update
25011 25009 * the reservation status.
25012 25010 */
25013 25011 un->un_resvd_status |=
25014 25012 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25015 25013 SD_INFO(SD_LOG_IOCTL_MHD, un,
25016 25014 "sd_mhd_watch_cb: Lost Reservation\n");
25017 25015 }
25018 25016 } else {
25019 25017 return (0);
25020 25018 }
25021 25019 } else {
25022 25020 mutex_enter(SD_MUTEX(un));
25023 25021 }
25024 25022
25025 25023 if ((un->un_resvd_status & SD_RESERVE) &&
25026 25024 (un->un_resvd_status & SD_LOST_RESERVE)) {
25027 25025 if (un->un_resvd_status & SD_WANT_RESERVE) {
25028 25026 /*
25029 25027 * A reset occurred in between the last probe and this
25030 25028 * one so if a timeout is pending cancel it.
25031 25029 */
25032 25030 if (un->un_resvd_timeid) {
25033 25031 timeout_id_t temp_id = un->un_resvd_timeid;
25034 25032 un->un_resvd_timeid = NULL;
25035 25033 mutex_exit(SD_MUTEX(un));
25036 25034 (void) untimeout(temp_id);
25037 25035 mutex_enter(SD_MUTEX(un));
25038 25036 }
25039 25037 un->un_resvd_status &= ~SD_WANT_RESERVE;
25040 25038 }
25041 25039 if (un->un_resvd_timeid == 0) {
25042 25040 /* Schedule a timeout to handle the lost reservation */
25043 25041 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
25044 25042 (void *)dev,
25045 25043 drv_usectohz(sd_reinstate_resv_delay));
25046 25044 }
25047 25045 }
25048 25046 mutex_exit(SD_MUTEX(un));
25049 25047 return (0);
25050 25048 }
25051 25049
25052 25050
25053 25051 /*
25054 25052 * Function: sd_mhd_watch_incomplete()
25055 25053 *
25056 25054 * Description: This function is used to find out why a scsi pkt sent by the
25057 25055 * scsi watch facility was not completed. Under some scenarios this
25058 25056 * routine will return. Otherwise it will send a bus reset to see
25059 25057 * if the drive is still online.
25060 25058 *
25061 25059 * Arguments: un - driver soft state (unit) structure
25062 25060 * pkt - incomplete scsi pkt
25063 25061 */
25064 25062
25065 25063 static void
25066 25064 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
25067 25065 {
25068 25066 int be_chatty;
25069 25067 int perr;
25070 25068
25071 25069 ASSERT(pkt != NULL);
25072 25070 ASSERT(un != NULL);
25073 25071 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
25074 25072 perr = (pkt->pkt_statistics & STAT_PERR);
25075 25073
25076 25074 mutex_enter(SD_MUTEX(un));
25077 25075 if (un->un_state == SD_STATE_DUMPING) {
25078 25076 mutex_exit(SD_MUTEX(un));
25079 25077 return;
25080 25078 }
25081 25079
25082 25080 switch (pkt->pkt_reason) {
25083 25081 case CMD_UNX_BUS_FREE:
25084 25082 /*
25085 25083 * If we had a parity error that caused the target to drop BSY*,
25086 25084 * don't be chatty about it.
25087 25085 */
25088 25086 if (perr && be_chatty) {
25089 25087 be_chatty = 0;
25090 25088 }
25091 25089 break;
25092 25090 case CMD_TAG_REJECT:
25093 25091 /*
25094 25092 * The SCSI-2 spec states that a tag reject will be sent by the
25095 25093 * target if tagged queuing is not supported. A tag reject may
25096 25094 * also be sent during certain initialization periods or to
25097 25095 * control internal resources. For the latter case the target
25098 25096 * may also return Queue Full.
25099 25097 *
25100 25098 * If this driver receives a tag reject from a target that is
25101 25099 * going through an init period or controlling internal
25102 25100 * resources tagged queuing will be disabled. This is a less
25103 25101 * than optimal behavior but the driver is unable to determine
25104 25102 * the target state and assumes tagged queueing is not supported
25105 25103 */
25106 25104 pkt->pkt_flags = 0;
25107 25105 un->un_tagflags = 0;
25108 25106
25109 25107 if (un->un_f_opt_queueing == TRUE) {
25110 25108 un->un_throttle = min(un->un_throttle, 3);
25111 25109 } else {
25112 25110 un->un_throttle = 1;
25113 25111 }
25114 25112 mutex_exit(SD_MUTEX(un));
25115 25113 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
25116 25114 mutex_enter(SD_MUTEX(un));
25117 25115 break;
25118 25116 case CMD_INCOMPLETE:
25119 25117 /*
25120 25118 * The transport stopped with an abnormal state, fallthrough and
25121 25119 * reset the target and/or bus unless selection did not complete
25122 25120 * (indicated by STATE_GOT_BUS) in which case we don't want to
25123 25121 * go through a target/bus reset
25124 25122 */
25125 25123 if (pkt->pkt_state == STATE_GOT_BUS) {
25126 25124 break;
25127 25125 }
25128 25126 /*FALLTHROUGH*/
25129 25127
25130 25128 case CMD_TIMEOUT:
25131 25129 default:
25132 25130 /*
25133 25131 * The lun may still be running the command, so a lun reset
25134 25132 * should be attempted. If the lun reset fails or cannot be
25135 25133 * issued, than try a target reset. Lastly try a bus reset.
25136 25134 */
25137 25135 if ((pkt->pkt_statistics &
25138 25136 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
25139 25137 int reset_retval = 0;
25140 25138 mutex_exit(SD_MUTEX(un));
25141 25139 if (un->un_f_allow_bus_device_reset == TRUE) {
25142 25140 if (un->un_f_lun_reset_enabled == TRUE) {
25143 25141 reset_retval =
25144 25142 scsi_reset(SD_ADDRESS(un),
25145 25143 RESET_LUN);
25146 25144 }
25147 25145 if (reset_retval == 0) {
25148 25146 reset_retval =
25149 25147 scsi_reset(SD_ADDRESS(un),
25150 25148 RESET_TARGET);
25151 25149 }
25152 25150 }
25153 25151 if (reset_retval == 0) {
25154 25152 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
25155 25153 }
25156 25154 mutex_enter(SD_MUTEX(un));
25157 25155 }
25158 25156 break;
25159 25157 }
25160 25158
25161 25159 /* A device/bus reset has occurred; update the reservation status. */
25162 25160 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
25163 25161 (STAT_BUS_RESET | STAT_DEV_RESET))) {
25164 25162 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25165 25163 un->un_resvd_status |=
25166 25164 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25167 25165 SD_INFO(SD_LOG_IOCTL_MHD, un,
25168 25166 "sd_mhd_watch_incomplete: Lost Reservation\n");
25169 25167 }
25170 25168 }
25171 25169
25172 25170 /*
25173 25171 * The disk has been turned off; Update the device state.
25174 25172 *
25175 25173 * Note: Should we be offlining the disk here?
25176 25174 */
25177 25175 if (pkt->pkt_state == STATE_GOT_BUS) {
25178 25176 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
25179 25177 "Disk not responding to selection\n");
25180 25178 if (un->un_state != SD_STATE_OFFLINE) {
25181 25179 New_state(un, SD_STATE_OFFLINE);
25182 25180 }
25183 25181 } else if (be_chatty) {
25184 25182 /*
25185 25183 * suppress messages if they are all the same pkt reason;
25186 25184 * with TQ, many (up to 256) are returned with the same
25187 25185 * pkt_reason
25188 25186 */
25189 25187 if (pkt->pkt_reason != un->un_last_pkt_reason) {
25190 25188 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25191 25189 "sd_mhd_watch_incomplete: "
25192 25190 "SCSI transport failed: reason '%s'\n",
25193 25191 scsi_rname(pkt->pkt_reason));
25194 25192 }
25195 25193 }
25196 25194 un->un_last_pkt_reason = pkt->pkt_reason;
25197 25195 mutex_exit(SD_MUTEX(un));
25198 25196 }
25199 25197
25200 25198
25201 25199 /*
25202 25200 * Function: sd_sname()
25203 25201 *
25204 25202 * Description: This is a simple little routine to return a string containing
25205 25203 * a printable description of command status byte for use in
25206 25204 * logging.
25207 25205 *
25208 25206 * Arguments: status - pointer to a status byte
25209 25207 *
25210 25208 * Return Code: char * - string containing status description.
25211 25209 */
25212 25210
25213 25211 static char *
25214 25212 sd_sname(uchar_t status)
25215 25213 {
25216 25214 switch (status & STATUS_MASK) {
25217 25215 case STATUS_GOOD:
25218 25216 return ("good status");
25219 25217 case STATUS_CHECK:
25220 25218 return ("check condition");
25221 25219 case STATUS_MET:
25222 25220 return ("condition met");
25223 25221 case STATUS_BUSY:
25224 25222 return ("busy");
25225 25223 case STATUS_INTERMEDIATE:
25226 25224 return ("intermediate");
25227 25225 case STATUS_INTERMEDIATE_MET:
25228 25226 return ("intermediate - condition met");
25229 25227 case STATUS_RESERVATION_CONFLICT:
25230 25228 return ("reservation_conflict");
25231 25229 case STATUS_TERMINATED:
25232 25230 return ("command terminated");
25233 25231 case STATUS_QFULL:
25234 25232 return ("queue full");
25235 25233 default:
25236 25234 return ("<unknown status>");
25237 25235 }
25238 25236 }
25239 25237
25240 25238
25241 25239 /*
25242 25240 * Function: sd_mhd_resvd_recover()
25243 25241 *
25244 25242 * Description: This function adds a reservation entry to the
25245 25243 * sd_resv_reclaim_request list and signals the reservation
25246 25244 * reclaim thread that there is work pending. If the reservation
25247 25245 * reclaim thread has not been previously created this function
25248 25246 * will kick it off.
25249 25247 *
25250 25248 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25251 25249 * among multiple watches that share this callback function
25252 25250 *
25253 25251 * Context: This routine is called by timeout() and is run in interrupt
25254 25252 * context. It must not sleep or call other functions which may
25255 25253 * sleep.
25256 25254 */
25257 25255
25258 25256 static void
25259 25257 sd_mhd_resvd_recover(void *arg)
25260 25258 {
25261 25259 dev_t dev = (dev_t)arg;
25262 25260 struct sd_lun *un;
25263 25261 struct sd_thr_request *sd_treq = NULL;
25264 25262 struct sd_thr_request *sd_cur = NULL;
25265 25263 struct sd_thr_request *sd_prev = NULL;
25266 25264 int already_there = 0;
25267 25265
25268 25266 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25269 25267 return;
25270 25268 }
25271 25269
25272 25270 mutex_enter(SD_MUTEX(un));
25273 25271 un->un_resvd_timeid = NULL;
25274 25272 if (un->un_resvd_status & SD_WANT_RESERVE) {
25275 25273 /*
25276 25274 * There was a reset so don't issue the reserve, allow the
25277 25275 * sd_mhd_watch_cb callback function to notice this and
25278 25276 * reschedule the timeout for reservation.
25279 25277 */
25280 25278 mutex_exit(SD_MUTEX(un));
25281 25279 return;
25282 25280 }
25283 25281 mutex_exit(SD_MUTEX(un));
25284 25282
25285 25283 /*
25286 25284 * Add this device to the sd_resv_reclaim_request list and the
25287 25285 * sd_resv_reclaim_thread should take care of the rest.
25288 25286 *
25289 25287 * Note: We can't sleep in this context so if the memory allocation
25290 25288 * fails allow the sd_mhd_watch_cb callback function to notice this and
25291 25289 * reschedule the timeout for reservation. (4378460)
25292 25290 */
25293 25291 sd_treq = (struct sd_thr_request *)
25294 25292 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
25295 25293 if (sd_treq == NULL) {
25296 25294 return;
25297 25295 }
25298 25296
25299 25297 sd_treq->sd_thr_req_next = NULL;
25300 25298 sd_treq->dev = dev;
25301 25299 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25302 25300 if (sd_tr.srq_thr_req_head == NULL) {
25303 25301 sd_tr.srq_thr_req_head = sd_treq;
25304 25302 } else {
25305 25303 sd_cur = sd_prev = sd_tr.srq_thr_req_head;
25306 25304 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
25307 25305 if (sd_cur->dev == dev) {
25308 25306 /*
25309 25307 * already in Queue so don't log
25310 25308 * another request for the device
25311 25309 */
25312 25310 already_there = 1;
25313 25311 break;
25314 25312 }
25315 25313 sd_prev = sd_cur;
25316 25314 }
25317 25315 if (!already_there) {
25318 25316 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
25319 25317 "logging request for %lx\n", dev);
25320 25318 sd_prev->sd_thr_req_next = sd_treq;
25321 25319 } else {
25322 25320 kmem_free(sd_treq, sizeof (struct sd_thr_request));
25323 25321 }
25324 25322 }
25325 25323
25326 25324 /*
25327 25325 * Create a kernel thread to do the reservation reclaim and free up this
25328 25326 * thread. We cannot block this thread while we go away to do the
25329 25327 * reservation reclaim
25330 25328 */
25331 25329 if (sd_tr.srq_resv_reclaim_thread == NULL)
25332 25330 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
25333 25331 sd_resv_reclaim_thread, NULL,
25334 25332 0, &p0, TS_RUN, v.v_maxsyspri - 2);
25335 25333
25336 25334 /* Tell the reservation reclaim thread that it has work to do */
25337 25335 cv_signal(&sd_tr.srq_resv_reclaim_cv);
25338 25336 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25339 25337 }
25340 25338
25341 25339 /*
25342 25340 * Function: sd_resv_reclaim_thread()
25343 25341 *
25344 25342 * Description: This function implements the reservation reclaim operations
25345 25343 *
25346 25344 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25347 25345 * among multiple watches that share this callback function
25348 25346 */
25349 25347
25350 25348 static void
25351 25349 sd_resv_reclaim_thread()
25352 25350 {
25353 25351 struct sd_lun *un;
25354 25352 struct sd_thr_request *sd_mhreq;
25355 25353
25356 25354 /* Wait for work */
25357 25355 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25358 25356 if (sd_tr.srq_thr_req_head == NULL) {
25359 25357 cv_wait(&sd_tr.srq_resv_reclaim_cv,
25360 25358 &sd_tr.srq_resv_reclaim_mutex);
25361 25359 }
25362 25360
25363 25361 /* Loop while we have work */
25364 25362 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
25365 25363 un = ddi_get_soft_state(sd_state,
25366 25364 SDUNIT(sd_tr.srq_thr_cur_req->dev));
25367 25365 if (un == NULL) {
25368 25366 /*
25369 25367 * softstate structure is NULL so just
25370 25368 * dequeue the request and continue
25371 25369 */
25372 25370 sd_tr.srq_thr_req_head =
25373 25371 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25374 25372 kmem_free(sd_tr.srq_thr_cur_req,
25375 25373 sizeof (struct sd_thr_request));
25376 25374 continue;
25377 25375 }
25378 25376
25379 25377 /* dequeue the request */
25380 25378 sd_mhreq = sd_tr.srq_thr_cur_req;
25381 25379 sd_tr.srq_thr_req_head =
25382 25380 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25383 25381 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25384 25382
25385 25383 /*
25386 25384 * Reclaim reservation only if SD_RESERVE is still set. There
25387 25385 * may have been a call to MHIOCRELEASE before we got here.
25388 25386 */
25389 25387 mutex_enter(SD_MUTEX(un));
25390 25388 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25391 25389 /*
25392 25390 * Note: The SD_LOST_RESERVE flag is cleared before
25393 25391 * reclaiming the reservation. If this is done after the
25394 25392 * call to sd_reserve_release a reservation loss in the
25395 25393 * window between pkt completion of reserve cmd and
25396 25394 * mutex_enter below may not be recognized
25397 25395 */
25398 25396 un->un_resvd_status &= ~SD_LOST_RESERVE;
25399 25397 mutex_exit(SD_MUTEX(un));
25400 25398
25401 25399 if (sd_reserve_release(sd_mhreq->dev,
25402 25400 SD_RESERVE) == 0) {
25403 25401 mutex_enter(SD_MUTEX(un));
25404 25402 un->un_resvd_status |= SD_RESERVE;
25405 25403 mutex_exit(SD_MUTEX(un));
25406 25404 SD_INFO(SD_LOG_IOCTL_MHD, un,
25407 25405 "sd_resv_reclaim_thread: "
25408 25406 "Reservation Recovered\n");
25409 25407 } else {
25410 25408 mutex_enter(SD_MUTEX(un));
25411 25409 un->un_resvd_status |= SD_LOST_RESERVE;
25412 25410 mutex_exit(SD_MUTEX(un));
25413 25411 SD_INFO(SD_LOG_IOCTL_MHD, un,
25414 25412 "sd_resv_reclaim_thread: Failed "
25415 25413 "Reservation Recovery\n");
25416 25414 }
25417 25415 } else {
25418 25416 mutex_exit(SD_MUTEX(un));
25419 25417 }
25420 25418 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25421 25419 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
25422 25420 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25423 25421 sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
25424 25422 /*
25425 25423 * wakeup the destroy thread if anyone is waiting on
25426 25424 * us to complete.
25427 25425 */
25428 25426 cv_signal(&sd_tr.srq_inprocess_cv);
25429 25427 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25430 25428 "sd_resv_reclaim_thread: cv_signalling current request \n");
25431 25429 }
25432 25430
25433 25431 /*
25434 25432 * cleanup the sd_tr structure now that this thread will not exist
25435 25433 */
25436 25434 ASSERT(sd_tr.srq_thr_req_head == NULL);
25437 25435 ASSERT(sd_tr.srq_thr_cur_req == NULL);
25438 25436 sd_tr.srq_resv_reclaim_thread = NULL;
25439 25437 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25440 25438 thread_exit();
25441 25439 }
25442 25440
25443 25441
25444 25442 /*
25445 25443 * Function: sd_rmv_resv_reclaim_req()
25446 25444 *
25447 25445 * Description: This function removes any pending reservation reclaim requests
25448 25446 * for the specified device.
25449 25447 *
25450 25448 * Arguments: dev - the device 'dev_t'
25451 25449 */
25452 25450
25453 25451 static void
25454 25452 sd_rmv_resv_reclaim_req(dev_t dev)
25455 25453 {
25456 25454 struct sd_thr_request *sd_mhreq;
25457 25455 struct sd_thr_request *sd_prev;
25458 25456
25459 25457 /* Remove a reservation reclaim request from the list */
25460 25458 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25461 25459 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
25462 25460 /*
25463 25461 * We are attempting to reinstate reservation for
25464 25462 * this device. We wait for sd_reserve_release()
25465 25463 * to return before we return.
25466 25464 */
25467 25465 cv_wait(&sd_tr.srq_inprocess_cv,
25468 25466 &sd_tr.srq_resv_reclaim_mutex);
25469 25467 } else {
25470 25468 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
25471 25469 if (sd_mhreq && sd_mhreq->dev == dev) {
25472 25470 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
25473 25471 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25474 25472 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25475 25473 return;
25476 25474 }
25477 25475 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
25478 25476 if (sd_mhreq && sd_mhreq->dev == dev) {
25479 25477 break;
25480 25478 }
25481 25479 sd_prev = sd_mhreq;
25482 25480 }
25483 25481 if (sd_mhreq != NULL) {
25484 25482 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
25485 25483 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25486 25484 }
25487 25485 }
25488 25486 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25489 25487 }
25490 25488
25491 25489
25492 25490 /*
25493 25491 * Function: sd_mhd_reset_notify_cb()
25494 25492 *
25495 25493 * Description: This is a call back function for scsi_reset_notify. This
25496 25494 * function updates the softstate reserved status and logs the
25497 25495 * reset. The driver scsi watch facility callback function
25498 25496 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25499 25497 * will reclaim the reservation.
25500 25498 *
25501 25499 * Arguments: arg - driver soft state (unit) structure
25502 25500 */
25503 25501
25504 25502 static void
25505 25503 sd_mhd_reset_notify_cb(caddr_t arg)
25506 25504 {
25507 25505 struct sd_lun *un = (struct sd_lun *)arg;
25508 25506
25509 25507 mutex_enter(SD_MUTEX(un));
25510 25508 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25511 25509 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
25512 25510 SD_INFO(SD_LOG_IOCTL_MHD, un,
25513 25511 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25514 25512 }
25515 25513 mutex_exit(SD_MUTEX(un));
25516 25514 }
25517 25515
25518 25516
25519 25517 /*
25520 25518 * Function: sd_take_ownership()
25521 25519 *
25522 25520 * Description: This routine implements an algorithm to achieve a stable
25523 25521 * reservation on disks which don't implement priority reserve,
25524 25522 * and makes sure that other host lose re-reservation attempts.
25525 25523 * This algorithm contains of a loop that keeps issuing the RESERVE
25526 25524 * for some period of time (min_ownership_delay, default 6 seconds)
25527 25525 * During that loop, it looks to see if there has been a bus device
25528 25526 * reset or bus reset (both of which cause an existing reservation
25529 25527 * to be lost). If the reservation is lost issue RESERVE until a
25530 25528 * period of min_ownership_delay with no resets has gone by, or
25531 25529 * until max_ownership_delay has expired. This loop ensures that
25532 25530 * the host really did manage to reserve the device, in spite of
25533 25531 * resets. The looping for min_ownership_delay (default six
25534 25532 * seconds) is important to early generation clustering products,
25535 25533 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25536 25534 * MHIOCENFAILFAST periodic timer of two seconds. By having
25537 25535 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25538 25536 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25539 25537 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25540 25538 * have already noticed, via the MHIOCENFAILFAST polling, that it
25541 25539 * no longer "owns" the disk and will have panicked itself. Thus,
25542 25540 * the host issuing the MHIOCTKOWN is assured (with timing
25543 25541 * dependencies) that by the time it actually starts to use the
25544 25542 * disk for real work, the old owner is no longer accessing it.
25545 25543 *
25546 25544 * min_ownership_delay is the minimum amount of time for which the
25547 25545 * disk must be reserved continuously devoid of resets before the
25548 25546 * MHIOCTKOWN ioctl will return success.
25549 25547 *
25550 25548 * max_ownership_delay indicates the amount of time by which the
25551 25549 * take ownership should succeed or timeout with an error.
25552 25550 *
25553 25551 * Arguments: dev - the device 'dev_t'
25554 25552 * *p - struct containing timing info.
25555 25553 *
25556 25554 * Return Code: 0 for success or error code
25557 25555 */
25558 25556
25559 25557 static int
25560 25558 sd_take_ownership(dev_t dev, struct mhioctkown *p)
25561 25559 {
25562 25560 struct sd_lun *un;
25563 25561 int rval;
25564 25562 int err;
25565 25563 int reservation_count = 0;
25566 25564 int min_ownership_delay = 6000000; /* in usec */
25567 25565 int max_ownership_delay = 30000000; /* in usec */
25568 25566 clock_t start_time; /* starting time of this algorithm */
25569 25567 clock_t end_time; /* time limit for giving up */
25570 25568 clock_t ownership_time; /* time limit for stable ownership */
25571 25569 clock_t current_time;
25572 25570 clock_t previous_current_time;
25573 25571
25574 25572 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25575 25573 return (ENXIO);
25576 25574 }
25577 25575
25578 25576 /*
25579 25577 * Attempt a device reservation. A priority reservation is requested.
25580 25578 */
25581 25579 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
25582 25580 != SD_SUCCESS) {
25583 25581 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25584 25582 "sd_take_ownership: return(1)=%d\n", rval);
25585 25583 return (rval);
25586 25584 }
25587 25585
25588 25586 /* Update the softstate reserved status to indicate the reservation */
25589 25587 mutex_enter(SD_MUTEX(un));
25590 25588 un->un_resvd_status |= SD_RESERVE;
25591 25589 un->un_resvd_status &=
25592 25590 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
25593 25591 mutex_exit(SD_MUTEX(un));
25594 25592
25595 25593 if (p != NULL) {
25596 25594 if (p->min_ownership_delay != 0) {
25597 25595 min_ownership_delay = p->min_ownership_delay * 1000;
25598 25596 }
25599 25597 if (p->max_ownership_delay != 0) {
25600 25598 max_ownership_delay = p->max_ownership_delay * 1000;
25601 25599 }
25602 25600 }
25603 25601 SD_INFO(SD_LOG_IOCTL_MHD, un,
25604 25602 "sd_take_ownership: min, max delays: %d, %d\n",
25605 25603 min_ownership_delay, max_ownership_delay);
25606 25604
25607 25605 start_time = ddi_get_lbolt();
25608 25606 current_time = start_time;
25609 25607 ownership_time = current_time + drv_usectohz(min_ownership_delay);
25610 25608 end_time = start_time + drv_usectohz(max_ownership_delay);
25611 25609
25612 25610 while (current_time - end_time < 0) {
25613 25611 delay(drv_usectohz(500000));
25614 25612
25615 25613 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
25616 25614 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
25617 25615 mutex_enter(SD_MUTEX(un));
25618 25616 rval = (un->un_resvd_status &
25619 25617 SD_RESERVATION_CONFLICT) ? EACCES : EIO;
25620 25618 mutex_exit(SD_MUTEX(un));
25621 25619 break;
25622 25620 }
25623 25621 }
25624 25622 previous_current_time = current_time;
25625 25623 current_time = ddi_get_lbolt();
25626 25624 mutex_enter(SD_MUTEX(un));
25627 25625 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
25628 25626 ownership_time = ddi_get_lbolt() +
25629 25627 drv_usectohz(min_ownership_delay);
25630 25628 reservation_count = 0;
25631 25629 } else {
25632 25630 reservation_count++;
25633 25631 }
25634 25632 un->un_resvd_status |= SD_RESERVE;
25635 25633 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
25636 25634 mutex_exit(SD_MUTEX(un));
25637 25635
25638 25636 SD_INFO(SD_LOG_IOCTL_MHD, un,
25639 25637 "sd_take_ownership: ticks for loop iteration=%ld, "
25640 25638 "reservation=%s\n", (current_time - previous_current_time),
25641 25639 reservation_count ? "ok" : "reclaimed");
25642 25640
25643 25641 if (current_time - ownership_time >= 0 &&
25644 25642 reservation_count >= 4) {
25645 25643 rval = 0; /* Achieved a stable ownership */
25646 25644 break;
25647 25645 }
25648 25646 if (current_time - end_time >= 0) {
25649 25647 rval = EACCES; /* No ownership in max possible time */
25650 25648 break;
25651 25649 }
25652 25650 }
25653 25651 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25654 25652 "sd_take_ownership: return(2)=%d\n", rval);
25655 25653 return (rval);
25656 25654 }
25657 25655
25658 25656
25659 25657 /*
25660 25658 * Function: sd_reserve_release()
25661 25659 *
25662 25660 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25663 25661 * PRIORITY RESERVE commands based on a user specified command type
25664 25662 *
25665 25663 * Arguments: dev - the device 'dev_t'
25666 25664 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25667 25665 * SD_RESERVE, SD_RELEASE
25668 25666 *
25669 25667 * Return Code: 0 or Error Code
25670 25668 */
25671 25669
25672 25670 static int
25673 25671 sd_reserve_release(dev_t dev, int cmd)
25674 25672 {
25675 25673 struct uscsi_cmd *com = NULL;
25676 25674 struct sd_lun *un = NULL;
25677 25675 char cdb[CDB_GROUP0];
25678 25676 int rval;
25679 25677
25680 25678 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
25681 25679 (cmd == SD_PRIORITY_RESERVE));
25682 25680
25683 25681 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25684 25682 return (ENXIO);
25685 25683 }
25686 25684
25687 25685 /* instantiate and initialize the command and cdb */
25688 25686 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
25689 25687 bzero(cdb, CDB_GROUP0);
25690 25688 com->uscsi_flags = USCSI_SILENT;
25691 25689 com->uscsi_timeout = un->un_reserve_release_time;
25692 25690 com->uscsi_cdblen = CDB_GROUP0;
25693 25691 com->uscsi_cdb = cdb;
25694 25692 if (cmd == SD_RELEASE) {
25695 25693 cdb[0] = SCMD_RELEASE;
25696 25694 } else {
25697 25695 cdb[0] = SCMD_RESERVE;
25698 25696 }
25699 25697
25700 25698 /* Send the command. */
25701 25699 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25702 25700 SD_PATH_STANDARD);
25703 25701
25704 25702 /*
25705 25703 * "break" a reservation that is held by another host, by issuing a
25706 25704 * reset if priority reserve is desired, and we could not get the
25707 25705 * device.
25708 25706 */
25709 25707 if ((cmd == SD_PRIORITY_RESERVE) &&
25710 25708 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25711 25709 /*
25712 25710 * First try to reset the LUN. If we cannot, then try a target
25713 25711 * reset, followed by a bus reset if the target reset fails.
25714 25712 */
25715 25713 int reset_retval = 0;
25716 25714 if (un->un_f_lun_reset_enabled == TRUE) {
25717 25715 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
25718 25716 }
25719 25717 if (reset_retval == 0) {
25720 25718 /* The LUN reset either failed or was not issued */
25721 25719 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
25722 25720 }
25723 25721 if ((reset_retval == 0) &&
25724 25722 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
25725 25723 rval = EIO;
25726 25724 kmem_free(com, sizeof (*com));
25727 25725 return (rval);
25728 25726 }
25729 25727
25730 25728 bzero(com, sizeof (struct uscsi_cmd));
25731 25729 com->uscsi_flags = USCSI_SILENT;
25732 25730 com->uscsi_cdb = cdb;
25733 25731 com->uscsi_cdblen = CDB_GROUP0;
25734 25732 com->uscsi_timeout = 5;
25735 25733
25736 25734 /*
25737 25735 * Reissue the last reserve command, this time without request
25738 25736 * sense. Assume that it is just a regular reserve command.
25739 25737 */
25740 25738 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25741 25739 SD_PATH_STANDARD);
25742 25740 }
25743 25741
25744 25742 /* Return an error if still getting a reservation conflict. */
25745 25743 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25746 25744 rval = EACCES;
25747 25745 }
25748 25746
25749 25747 kmem_free(com, sizeof (*com));
25750 25748 return (rval);
25751 25749 }
25752 25750
25753 25751
25754 25752 #define SD_NDUMP_RETRIES 12
25755 25753 /*
25756 25754 * System Crash Dump routine
25757 25755 */
25758 25756
25759 25757 static int
25760 25758 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
25761 25759 {
25762 25760 int instance;
25763 25761 int partition;
25764 25762 int i;
25765 25763 int err;
25766 25764 struct sd_lun *un;
25767 25765 struct scsi_pkt *wr_pktp;
25768 25766 struct buf *wr_bp;
25769 25767 struct buf wr_buf;
25770 25768 daddr_t tgt_byte_offset; /* rmw - byte offset for target */
25771 25769 daddr_t tgt_blkno; /* rmw - blkno for target */
25772 25770 size_t tgt_byte_count; /* rmw - # of bytes to xfer */
25773 25771 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */
25774 25772 size_t io_start_offset;
25775 25773 int doing_rmw = FALSE;
25776 25774 int rval;
25777 25775 ssize_t dma_resid;
25778 25776 daddr_t oblkno;
25779 25777 diskaddr_t nblks = 0;
25780 25778 diskaddr_t start_block;
25781 25779
25782 25780 instance = SDUNIT(dev);
25783 25781 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
25784 25782 !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25785 25783 return (ENXIO);
25786 25784 }
25787 25785
25788 25786 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25789 25787
25790 25788 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25791 25789
25792 25790 partition = SDPART(dev);
25793 25791 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25794 25792
25795 25793 if (!(NOT_DEVBSIZE(un))) {
25796 25794 int secmask = 0;
25797 25795 int blknomask = 0;
25798 25796
25799 25797 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
25800 25798 secmask = un->un_tgt_blocksize - 1;
25801 25799
25802 25800 if (blkno & blknomask) {
25803 25801 SD_TRACE(SD_LOG_DUMP, un,
25804 25802 "sddump: dump start block not modulo %d\n",
25805 25803 un->un_tgt_blocksize);
25806 25804 return (EINVAL);
25807 25805 }
25808 25806
25809 25807 if ((nblk * DEV_BSIZE) & secmask) {
25810 25808 SD_TRACE(SD_LOG_DUMP, un,
25811 25809 "sddump: dump length not modulo %d\n",
25812 25810 un->un_tgt_blocksize);
25813 25811 return (EINVAL);
25814 25812 }
25815 25813
25816 25814 }
25817 25815
25818 25816 /* Validate blocks to dump at against partition size. */
25819 25817
25820 25818 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
25821 25819 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
25822 25820
25823 25821 if (NOT_DEVBSIZE(un)) {
25824 25822 if ((blkno + nblk) > nblks) {
25825 25823 SD_TRACE(SD_LOG_DUMP, un,
25826 25824 "sddump: dump range larger than partition: "
25827 25825 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25828 25826 blkno, nblk, nblks);
25829 25827 return (EINVAL);
25830 25828 }
25831 25829 } else {
25832 25830 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
25833 25831 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
25834 25832 SD_TRACE(SD_LOG_DUMP, un,
25835 25833 "sddump: dump range larger than partition: "
25836 25834 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25837 25835 blkno, nblk, nblks);
25838 25836 return (EINVAL);
25839 25837 }
25840 25838 }
25841 25839
25842 25840 mutex_enter(&un->un_pm_mutex);
25843 25841 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
25844 25842 struct scsi_pkt *start_pktp;
25845 25843
25846 25844 mutex_exit(&un->un_pm_mutex);
25847 25845
25848 25846 /*
25849 25847 * use pm framework to power on HBA 1st
25850 25848 */
25851 25849 (void) pm_raise_power(SD_DEVINFO(un), 0,
25852 25850 SD_PM_STATE_ACTIVE(un));
25853 25851
25854 25852 /*
25855 25853 * Dump no long uses sdpower to power on a device, it's
25856 25854 * in-line here so it can be done in polled mode.
25857 25855 */
25858 25856
25859 25857 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
25860 25858
25861 25859 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
25862 25860 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
25863 25861
25864 25862 if (start_pktp == NULL) {
25865 25863 /* We were not given a SCSI packet, fail. */
25866 25864 return (EIO);
25867 25865 }
25868 25866 bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
25869 25867 start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
25870 25868 start_pktp->pkt_cdbp[4] = SD_TARGET_START;
25871 25869 start_pktp->pkt_flags = FLAG_NOINTR;
25872 25870
25873 25871 mutex_enter(SD_MUTEX(un));
25874 25872 SD_FILL_SCSI1_LUN(un, start_pktp);
25875 25873 mutex_exit(SD_MUTEX(un));
25876 25874 /*
25877 25875 * Scsi_poll returns 0 (success) if the command completes and
25878 25876 * the status block is STATUS_GOOD.
25879 25877 */
25880 25878 if (sd_scsi_poll(un, start_pktp) != 0) {
25881 25879 scsi_destroy_pkt(start_pktp);
25882 25880 return (EIO);
25883 25881 }
25884 25882 scsi_destroy_pkt(start_pktp);
25885 25883 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
25886 25884 SD_PM_STATE_CHANGE);
25887 25885 } else {
25888 25886 mutex_exit(&un->un_pm_mutex);
25889 25887 }
25890 25888
25891 25889 mutex_enter(SD_MUTEX(un));
25892 25890 un->un_throttle = 0;
25893 25891
25894 25892 /*
25895 25893 * The first time through, reset the specific target device.
25896 25894 * However, when cpr calls sddump we know that sd is in a
25897 25895 * a good state so no bus reset is required.
25898 25896 * Clear sense data via Request Sense cmd.
25899 25897 * In sddump we don't care about allow_bus_device_reset anymore
25900 25898 */
25901 25899
25902 25900 if ((un->un_state != SD_STATE_SUSPENDED) &&
25903 25901 (un->un_state != SD_STATE_DUMPING)) {
25904 25902
25905 25903 New_state(un, SD_STATE_DUMPING);
25906 25904
25907 25905 if (un->un_f_is_fibre == FALSE) {
25908 25906 mutex_exit(SD_MUTEX(un));
25909 25907 /*
25910 25908 * Attempt a bus reset for parallel scsi.
25911 25909 *
25912 25910 * Note: A bus reset is required because on some host
25913 25911 * systems (i.e. E420R) a bus device reset is
25914 25912 * insufficient to reset the state of the target.
25915 25913 *
25916 25914 * Note: Don't issue the reset for fibre-channel,
25917 25915 * because this tends to hang the bus (loop) for
25918 25916 * too long while everyone is logging out and in
25919 25917 * and the deadman timer for dumping will fire
25920 25918 * before the dump is complete.
25921 25919 */
25922 25920 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
25923 25921 mutex_enter(SD_MUTEX(un));
25924 25922 Restore_state(un);
25925 25923 mutex_exit(SD_MUTEX(un));
25926 25924 return (EIO);
25927 25925 }
25928 25926
25929 25927 /* Delay to give the device some recovery time. */
25930 25928 drv_usecwait(10000);
25931 25929
25932 25930 if (sd_send_polled_RQS(un) == SD_FAILURE) {
25933 25931 SD_INFO(SD_LOG_DUMP, un,
25934 25932 "sddump: sd_send_polled_RQS failed\n");
25935 25933 }
25936 25934 mutex_enter(SD_MUTEX(un));
25937 25935 }
25938 25936 }
25939 25937
25940 25938 /*
25941 25939 * Convert the partition-relative block number to a
25942 25940 * disk physical block number.
25943 25941 */
25944 25942 if (NOT_DEVBSIZE(un)) {
25945 25943 blkno += start_block;
25946 25944 } else {
25947 25945 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
25948 25946 blkno += start_block;
25949 25947 }
25950 25948
25951 25949 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
25952 25950
25953 25951
25954 25952 /*
25955 25953 * Check if the device has a non-512 block size.
25956 25954 */
25957 25955 wr_bp = NULL;
25958 25956 if (NOT_DEVBSIZE(un)) {
25959 25957 tgt_byte_offset = blkno * un->un_sys_blocksize;
25960 25958 tgt_byte_count = nblk * un->un_sys_blocksize;
25961 25959 if ((tgt_byte_offset % un->un_tgt_blocksize) ||
25962 25960 (tgt_byte_count % un->un_tgt_blocksize)) {
25963 25961 doing_rmw = TRUE;
25964 25962 /*
25965 25963 * Calculate the block number and number of block
25966 25964 * in terms of the media block size.
25967 25965 */
25968 25966 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25969 25967 tgt_nblk =
25970 25968 ((tgt_byte_offset + tgt_byte_count +
25971 25969 (un->un_tgt_blocksize - 1)) /
25972 25970 un->un_tgt_blocksize) - tgt_blkno;
25973 25971
25974 25972 /*
25975 25973 * Invoke the routine which is going to do read part
25976 25974 * of read-modify-write.
25977 25975 * Note that this routine returns a pointer to
25978 25976 * a valid bp in wr_bp.
25979 25977 */
25980 25978 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
25981 25979 &wr_bp);
25982 25980 if (err) {
25983 25981 mutex_exit(SD_MUTEX(un));
25984 25982 return (err);
25985 25983 }
25986 25984 /*
25987 25985 * Offset is being calculated as -
25988 25986 * (original block # * system block size) -
25989 25987 * (new block # * target block size)
25990 25988 */
25991 25989 io_start_offset =
25992 25990 ((uint64_t)(blkno * un->un_sys_blocksize)) -
25993 25991 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
25994 25992
25995 25993 ASSERT((io_start_offset >= 0) &&
25996 25994 (io_start_offset < un->un_tgt_blocksize));
25997 25995 /*
25998 25996 * Do the modify portion of read modify write.
25999 25997 */
26000 25998 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
26001 25999 (size_t)nblk * un->un_sys_blocksize);
26002 26000 } else {
26003 26001 doing_rmw = FALSE;
26004 26002 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
26005 26003 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
26006 26004 }
26007 26005
26008 26006 /* Convert blkno and nblk to target blocks */
26009 26007 blkno = tgt_blkno;
26010 26008 nblk = tgt_nblk;
26011 26009 } else {
26012 26010 wr_bp = &wr_buf;
26013 26011 bzero(wr_bp, sizeof (struct buf));
26014 26012 wr_bp->b_flags = B_BUSY;
26015 26013 wr_bp->b_un.b_addr = addr;
26016 26014 wr_bp->b_bcount = nblk << DEV_BSHIFT;
26017 26015 wr_bp->b_resid = 0;
26018 26016 }
26019 26017
26020 26018 mutex_exit(SD_MUTEX(un));
26021 26019
26022 26020 /*
26023 26021 * Obtain a SCSI packet for the write command.
26024 26022 * It should be safe to call the allocator here without
26025 26023 * worrying about being locked for DVMA mapping because
26026 26024 * the address we're passed is already a DVMA mapping
26027 26025 *
26028 26026 * We are also not going to worry about semaphore ownership
26029 26027 * in the dump buffer. Dumping is single threaded at present.
26030 26028 */
26031 26029
26032 26030 wr_pktp = NULL;
26033 26031
26034 26032 dma_resid = wr_bp->b_bcount;
26035 26033 oblkno = blkno;
26036 26034
26037 26035 if (!(NOT_DEVBSIZE(un))) {
26038 26036 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
26039 26037 }
26040 26038
26041 26039 while (dma_resid != 0) {
26042 26040
26043 26041 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26044 26042 wr_bp->b_flags &= ~B_ERROR;
26045 26043
26046 26044 if (un->un_partial_dma_supported == 1) {
26047 26045 blkno = oblkno +
26048 26046 ((wr_bp->b_bcount - dma_resid) /
26049 26047 un->un_tgt_blocksize);
26050 26048 nblk = dma_resid / un->un_tgt_blocksize;
26051 26049
26052 26050 if (wr_pktp) {
26053 26051 /*
26054 26052 * Partial DMA transfers after initial transfer
26055 26053 */
26056 26054 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
26057 26055 blkno, nblk);
26058 26056 } else {
26059 26057 /* Initial transfer */
26060 26058 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26061 26059 un->un_pkt_flags, NULL_FUNC, NULL,
26062 26060 blkno, nblk);
26063 26061 }
26064 26062 } else {
26065 26063 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26066 26064 0, NULL_FUNC, NULL, blkno, nblk);
26067 26065 }
26068 26066
26069 26067 if (rval == 0) {
26070 26068 /* We were given a SCSI packet, continue. */
26071 26069 break;
26072 26070 }
26073 26071
26074 26072 if (i == 0) {
26075 26073 if (wr_bp->b_flags & B_ERROR) {
26076 26074 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26077 26075 "no resources for dumping; "
26078 26076 "error code: 0x%x, retrying",
26079 26077 geterror(wr_bp));
26080 26078 } else {
26081 26079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26082 26080 "no resources for dumping; retrying");
26083 26081 }
26084 26082 } else if (i != (SD_NDUMP_RETRIES - 1)) {
26085 26083 if (wr_bp->b_flags & B_ERROR) {
26086 26084 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26087 26085 "no resources for dumping; error code: "
26088 26086 "0x%x, retrying\n", geterror(wr_bp));
26089 26087 }
26090 26088 } else {
26091 26089 if (wr_bp->b_flags & B_ERROR) {
26092 26090 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26093 26091 "no resources for dumping; "
26094 26092 "error code: 0x%x, retries failed, "
26095 26093 "giving up.\n", geterror(wr_bp));
26096 26094 } else {
26097 26095 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26098 26096 "no resources for dumping; "
26099 26097 "retries failed, giving up.\n");
26100 26098 }
26101 26099 mutex_enter(SD_MUTEX(un));
26102 26100 Restore_state(un);
26103 26101 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
26104 26102 mutex_exit(SD_MUTEX(un));
26105 26103 scsi_free_consistent_buf(wr_bp);
26106 26104 } else {
26107 26105 mutex_exit(SD_MUTEX(un));
26108 26106 }
26109 26107 return (EIO);
26110 26108 }
26111 26109 drv_usecwait(10000);
26112 26110 }
26113 26111
26114 26112 if (un->un_partial_dma_supported == 1) {
26115 26113 /*
26116 26114 * save the resid from PARTIAL_DMA
26117 26115 */
26118 26116 dma_resid = wr_pktp->pkt_resid;
26119 26117 if (dma_resid != 0)
26120 26118 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
26121 26119 wr_pktp->pkt_resid = 0;
26122 26120 } else {
26123 26121 dma_resid = 0;
26124 26122 }
26125 26123
26126 26124 /* SunBug 1222170 */
26127 26125 wr_pktp->pkt_flags = FLAG_NOINTR;
26128 26126
26129 26127 err = EIO;
26130 26128 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26131 26129
26132 26130 /*
26133 26131 * Scsi_poll returns 0 (success) if the command completes and
26134 26132 * the status block is STATUS_GOOD. We should only check
26135 26133 * errors if this condition is not true. Even then we should
26136 26134 * send our own request sense packet only if we have a check
26137 26135 * condition and auto request sense has not been performed by
26138 26136 * the hba.
26139 26137 */
26140 26138 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
26141 26139
26142 26140 if ((sd_scsi_poll(un, wr_pktp) == 0) &&
26143 26141 (wr_pktp->pkt_resid == 0)) {
26144 26142 err = SD_SUCCESS;
26145 26143 break;
26146 26144 }
26147 26145
26148 26146 /*
26149 26147 * Check CMD_DEV_GONE 1st, give up if device is gone.
26150 26148 */
26151 26149 if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
26152 26150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26153 26151 "Error while dumping state...Device is gone\n");
26154 26152 break;
26155 26153 }
26156 26154
26157 26155 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
26158 26156 SD_INFO(SD_LOG_DUMP, un,
26159 26157 "sddump: write failed with CHECK, try # %d\n", i);
26160 26158 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
26161 26159 (void) sd_send_polled_RQS(un);
26162 26160 }
26163 26161
26164 26162 continue;
26165 26163 }
26166 26164
26167 26165 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
26168 26166 int reset_retval = 0;
26169 26167
26170 26168 SD_INFO(SD_LOG_DUMP, un,
26171 26169 "sddump: write failed with BUSY, try # %d\n", i);
26172 26170
26173 26171 if (un->un_f_lun_reset_enabled == TRUE) {
26174 26172 reset_retval = scsi_reset(SD_ADDRESS(un),
26175 26173 RESET_LUN);
26176 26174 }
26177 26175 if (reset_retval == 0) {
26178 26176 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
26179 26177 }
26180 26178 (void) sd_send_polled_RQS(un);
26181 26179
26182 26180 } else {
26183 26181 SD_INFO(SD_LOG_DUMP, un,
26184 26182 "sddump: write failed with 0x%x, try # %d\n",
26185 26183 SD_GET_PKT_STATUS(wr_pktp), i);
26186 26184 mutex_enter(SD_MUTEX(un));
26187 26185 sd_reset_target(un, wr_pktp);
26188 26186 mutex_exit(SD_MUTEX(un));
26189 26187 }
26190 26188
26191 26189 /*
26192 26190 * If we are not getting anywhere with lun/target resets,
26193 26191 * let's reset the bus.
26194 26192 */
26195 26193 if (i == SD_NDUMP_RETRIES/2) {
26196 26194 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
26197 26195 (void) sd_send_polled_RQS(un);
26198 26196 }
26199 26197 }
26200 26198 }
26201 26199
26202 26200 scsi_destroy_pkt(wr_pktp);
26203 26201 mutex_enter(SD_MUTEX(un));
26204 26202 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
26205 26203 mutex_exit(SD_MUTEX(un));
26206 26204 scsi_free_consistent_buf(wr_bp);
26207 26205 } else {
26208 26206 mutex_exit(SD_MUTEX(un));
26209 26207 }
26210 26208 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
26211 26209 return (err);
26212 26210 }
26213 26211
26214 26212 /*
26215 26213 * Function: sd_scsi_poll()
26216 26214 *
26217 26215 * Description: This is a wrapper for the scsi_poll call.
26218 26216 *
26219 26217 * Arguments: sd_lun - The unit structure
26220 26218 * scsi_pkt - The scsi packet being sent to the device.
26221 26219 *
26222 26220 * Return Code: 0 - Command completed successfully with good status
26223 26221 * -1 - Command failed. This could indicate a check condition
26224 26222 * or other status value requiring recovery action.
26225 26223 *
26226 26224 * NOTE: This code is only called off sddump().
26227 26225 */
26228 26226
26229 26227 static int
26230 26228 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
26231 26229 {
26232 26230 int status;
26233 26231
26234 26232 ASSERT(un != NULL);
26235 26233 ASSERT(!mutex_owned(SD_MUTEX(un)));
26236 26234 ASSERT(pktp != NULL);
26237 26235
26238 26236 status = SD_SUCCESS;
26239 26237
26240 26238 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
26241 26239 pktp->pkt_flags |= un->un_tagflags;
26242 26240 pktp->pkt_flags &= ~FLAG_NODISCON;
26243 26241 }
26244 26242
26245 26243 status = sd_ddi_scsi_poll(pktp);
26246 26244 /*
26247 26245 * Scsi_poll returns 0 (success) if the command completes and the
26248 26246 * status block is STATUS_GOOD. We should only check errors if this
26249 26247 * condition is not true. Even then we should send our own request
26250 26248 * sense packet only if we have a check condition and auto
26251 26249 * request sense has not been performed by the hba.
26252 26250 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26253 26251 */
26254 26252 if ((status != SD_SUCCESS) &&
26255 26253 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
26256 26254 (pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
26257 26255 (pktp->pkt_reason != CMD_DEV_GONE))
26258 26256 (void) sd_send_polled_RQS(un);
26259 26257
26260 26258 return (status);
26261 26259 }
26262 26260
26263 26261 /*
26264 26262 * Function: sd_send_polled_RQS()
26265 26263 *
26266 26264 * Description: This sends the request sense command to a device.
26267 26265 *
26268 26266 * Arguments: sd_lun - The unit structure
26269 26267 *
26270 26268 * Return Code: 0 - Command completed successfully with good status
26271 26269 * -1 - Command failed.
26272 26270 *
26273 26271 */
26274 26272
26275 26273 static int
26276 26274 sd_send_polled_RQS(struct sd_lun *un)
26277 26275 {
26278 26276 int ret_val;
26279 26277 struct scsi_pkt *rqs_pktp;
26280 26278 struct buf *rqs_bp;
26281 26279
26282 26280 ASSERT(un != NULL);
26283 26281 ASSERT(!mutex_owned(SD_MUTEX(un)));
26284 26282
26285 26283 ret_val = SD_SUCCESS;
26286 26284
26287 26285 rqs_pktp = un->un_rqs_pktp;
26288 26286 rqs_bp = un->un_rqs_bp;
26289 26287
26290 26288 mutex_enter(SD_MUTEX(un));
26291 26289
26292 26290 if (un->un_sense_isbusy) {
26293 26291 ret_val = SD_FAILURE;
26294 26292 mutex_exit(SD_MUTEX(un));
26295 26293 return (ret_val);
26296 26294 }
26297 26295
26298 26296 /*
26299 26297 * If the request sense buffer (and packet) is not in use,
26300 26298 * let's set the un_sense_isbusy and send our packet
26301 26299 */
26302 26300 un->un_sense_isbusy = 1;
26303 26301 rqs_pktp->pkt_resid = 0;
26304 26302 rqs_pktp->pkt_reason = 0;
26305 26303 rqs_pktp->pkt_flags |= FLAG_NOINTR;
26306 26304 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
26307 26305
26308 26306 mutex_exit(SD_MUTEX(un));
26309 26307
26310 26308 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
26311 26309 " 0x%p\n", rqs_bp->b_un.b_addr);
26312 26310
26313 26311 /*
26314 26312 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26315 26313 * axle - it has a call into us!
26316 26314 */
26317 26315 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
26318 26316 SD_INFO(SD_LOG_COMMON, un,
26319 26317 "sd_send_polled_RQS: RQS failed\n");
26320 26318 }
26321 26319
26322 26320 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
26323 26321 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
26324 26322
26325 26323 mutex_enter(SD_MUTEX(un));
26326 26324 un->un_sense_isbusy = 0;
26327 26325 mutex_exit(SD_MUTEX(un));
26328 26326
26329 26327 return (ret_val);
26330 26328 }
26331 26329
26332 26330 /*
26333 26331 * Defines needed for localized version of the scsi_poll routine.
26334 26332 */
26335 26333 #define CSEC 10000 /* usecs */
26336 26334 #define SEC_TO_CSEC (1000000/CSEC)
26337 26335
26338 26336 /*
26339 26337 * Function: sd_ddi_scsi_poll()
26340 26338 *
26341 26339 * Description: Localized version of the scsi_poll routine. The purpose is to
26342 26340 * send a scsi_pkt to a device as a polled command. This version
26343 26341 * is to ensure more robust handling of transport errors.
26344 26342 * Specifically this routine cures not ready, coming ready
26345 26343 * transition for power up and reset of sonoma's. This can take
26346 26344 * up to 45 seconds for power-on and 20 seconds for reset of a
26347 26345 * sonoma lun.
26348 26346 *
26349 26347 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26350 26348 *
26351 26349 * Return Code: 0 - Command completed successfully with good status
26352 26350 * -1 - Command failed.
26353 26351 *
26354 26352 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26355 26353 * be fixed (removing this code), we need to determine how to handle the
26356 26354 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26357 26355 *
26358 26356 * NOTE: This code is only called off sddump().
26359 26357 */
26360 26358 static int
26361 26359 sd_ddi_scsi_poll(struct scsi_pkt *pkt)
26362 26360 {
26363 26361 int rval = -1;
26364 26362 int savef;
26365 26363 long savet;
26366 26364 void (*savec)();
26367 26365 int timeout;
26368 26366 int busy_count;
26369 26367 int poll_delay;
26370 26368 int rc;
26371 26369 uint8_t *sensep;
26372 26370 struct scsi_arq_status *arqstat;
26373 26371 extern int do_polled_io;
26374 26372
26375 26373 ASSERT(pkt->pkt_scbp);
26376 26374
26377 26375 /*
26378 26376 * save old flags..
26379 26377 */
26380 26378 savef = pkt->pkt_flags;
26381 26379 savec = pkt->pkt_comp;
26382 26380 savet = pkt->pkt_time;
26383 26381
26384 26382 pkt->pkt_flags |= FLAG_NOINTR;
26385 26383
26386 26384 /*
26387 26385 * XXX there is nothing in the SCSA spec that states that we should not
26388 26386 * do a callback for polled cmds; however, removing this will break sd
26389 26387 * and probably other target drivers
26390 26388 */
26391 26389 pkt->pkt_comp = NULL;
26392 26390
26393 26391 /*
26394 26392 * we don't like a polled command without timeout.
26395 26393 * 60 seconds seems long enough.
26396 26394 */
26397 26395 if (pkt->pkt_time == 0)
26398 26396 pkt->pkt_time = SCSI_POLL_TIMEOUT;
26399 26397
26400 26398 /*
26401 26399 * Send polled cmd.
26402 26400 *
26403 26401 * We do some error recovery for various errors. Tran_busy,
26404 26402 * queue full, and non-dispatched commands are retried every 10 msec.
26405 26403 * as they are typically transient failures. Busy status and Not
26406 26404 * Ready are retried every second as this status takes a while to
26407 26405 * change.
26408 26406 */
26409 26407 timeout = pkt->pkt_time * SEC_TO_CSEC;
26410 26408
26411 26409 for (busy_count = 0; busy_count < timeout; busy_count++) {
26412 26410 /*
26413 26411 * Initialize pkt status variables.
26414 26412 */
26415 26413 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
26416 26414
26417 26415 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
26418 26416 if (rc != TRAN_BUSY) {
26419 26417 /* Transport failed - give up. */
26420 26418 break;
26421 26419 } else {
26422 26420 /* Transport busy - try again. */
26423 26421 poll_delay = 1 * CSEC; /* 10 msec. */
26424 26422 }
26425 26423 } else {
26426 26424 /*
26427 26425 * Transport accepted - check pkt status.
26428 26426 */
26429 26427 rc = (*pkt->pkt_scbp) & STATUS_MASK;
26430 26428 if ((pkt->pkt_reason == CMD_CMPLT) &&
26431 26429 (rc == STATUS_CHECK) &&
26432 26430 (pkt->pkt_state & STATE_ARQ_DONE)) {
26433 26431 arqstat =
26434 26432 (struct scsi_arq_status *)(pkt->pkt_scbp);
26435 26433 sensep = (uint8_t *)&arqstat->sts_sensedata;
26436 26434 } else {
26437 26435 sensep = NULL;
26438 26436 }
26439 26437
26440 26438 if ((pkt->pkt_reason == CMD_CMPLT) &&
26441 26439 (rc == STATUS_GOOD)) {
26442 26440 /* No error - we're done */
26443 26441 rval = 0;
26444 26442 break;
26445 26443
26446 26444 } else if (pkt->pkt_reason == CMD_DEV_GONE) {
26447 26445 /* Lost connection - give up */
26448 26446 break;
26449 26447
26450 26448 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
26451 26449 (pkt->pkt_state == 0)) {
26452 26450 /* Pkt not dispatched - try again. */
26453 26451 poll_delay = 1 * CSEC; /* 10 msec. */
26454 26452
26455 26453 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26456 26454 (rc == STATUS_QFULL)) {
26457 26455 /* Queue full - try again. */
26458 26456 poll_delay = 1 * CSEC; /* 10 msec. */
26459 26457
26460 26458 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26461 26459 (rc == STATUS_BUSY)) {
26462 26460 /* Busy - try again. */
26463 26461 poll_delay = 100 * CSEC; /* 1 sec. */
26464 26462 busy_count += (SEC_TO_CSEC - 1);
26465 26463
26466 26464 } else if ((sensep != NULL) &&
26467 26465 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
26468 26466 /*
26469 26467 * Unit Attention - try again.
26470 26468 * Pretend it took 1 sec.
26471 26469 * NOTE: 'continue' avoids poll_delay
26472 26470 */
26473 26471 busy_count += (SEC_TO_CSEC - 1);
26474 26472 continue;
26475 26473
26476 26474 } else if ((sensep != NULL) &&
26477 26475 (scsi_sense_key(sensep) == KEY_NOT_READY) &&
26478 26476 (scsi_sense_asc(sensep) == 0x04) &&
26479 26477 (scsi_sense_ascq(sensep) == 0x01)) {
26480 26478 /*
26481 26479 * Not ready -> ready - try again.
26482 26480 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26483 26481 * ...same as STATUS_BUSY
26484 26482 */
26485 26483 poll_delay = 100 * CSEC; /* 1 sec. */
26486 26484 busy_count += (SEC_TO_CSEC - 1);
26487 26485
26488 26486 } else {
26489 26487 /* BAD status - give up. */
26490 26488 break;
26491 26489 }
26492 26490 }
26493 26491
26494 26492 if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
26495 26493 !do_polled_io) {
26496 26494 delay(drv_usectohz(poll_delay));
26497 26495 } else {
26498 26496 /* we busy wait during cpr_dump or interrupt threads */
26499 26497 drv_usecwait(poll_delay);
26500 26498 }
26501 26499 }
26502 26500
26503 26501 pkt->pkt_flags = savef;
26504 26502 pkt->pkt_comp = savec;
26505 26503 pkt->pkt_time = savet;
26506 26504
26507 26505 /* return on error */
26508 26506 if (rval)
26509 26507 return (rval);
26510 26508
26511 26509 /*
26512 26510 * This is not a performance critical code path.
26513 26511 *
26514 26512 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26515 26513 * issues associated with looking at DMA memory prior to
26516 26514 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26517 26515 */
26518 26516 scsi_sync_pkt(pkt);
26519 26517 return (0);
26520 26518 }
26521 26519
26522 26520
26523 26521
26524 26522 /*
26525 26523 * Function: sd_persistent_reservation_in_read_keys
26526 26524 *
26527 26525 * Description: This routine is the driver entry point for handling CD-ROM
26528 26526 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26529 26527 * by sending the SCSI-3 PRIN commands to the device.
26530 26528 * Processes the read keys command response by copying the
26531 26529 * reservation key information into the user provided buffer.
26532 26530 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26533 26531 *
26534 26532 * Arguments: un - Pointer to soft state struct for the target.
26535 26533 * usrp - user provided pointer to multihost Persistent In Read
26536 26534 * Keys structure (mhioc_inkeys_t)
26537 26535 * flag - this argument is a pass through to ddi_copyxxx()
26538 26536 * directly from the mode argument of ioctl().
26539 26537 *
26540 26538 * Return Code: 0 - Success
26541 26539 * EACCES
26542 26540 * ENOTSUP
26543 26541 * errno return code from sd_send_scsi_cmd()
26544 26542 *
26545 26543 * Context: Can sleep. Does not return until command is completed.
26546 26544 */
26547 26545
26548 26546 static int
26549 26547 sd_persistent_reservation_in_read_keys(struct sd_lun *un,
26550 26548 mhioc_inkeys_t *usrp, int flag)
26551 26549 {
26552 26550 #ifdef _MULTI_DATAMODEL
26553 26551 struct mhioc_key_list32 li32;
26554 26552 #endif
26555 26553 sd_prin_readkeys_t *in;
26556 26554 mhioc_inkeys_t *ptr;
26557 26555 mhioc_key_list_t li;
26558 26556 uchar_t *data_bufp;
26559 26557 int data_len;
26560 26558 int rval = 0;
26561 26559 size_t copysz;
26562 26560 sd_ssc_t *ssc;
26563 26561
26564 26562 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
26565 26563 return (EINVAL);
26566 26564 }
26567 26565 bzero(&li, sizeof (mhioc_key_list_t));
26568 26566
26569 26567 ssc = sd_ssc_init(un);
26570 26568
26571 26569 /*
26572 26570 * Get the listsize from user
26573 26571 */
26574 26572 #ifdef _MULTI_DATAMODEL
26575 26573
26576 26574 switch (ddi_model_convert_from(flag & FMODELS)) {
26577 26575 case DDI_MODEL_ILP32:
26578 26576 copysz = sizeof (struct mhioc_key_list32);
26579 26577 if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
26580 26578 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26581 26579 "sd_persistent_reservation_in_read_keys: "
26582 26580 "failed ddi_copyin: mhioc_key_list32_t\n");
26583 26581 rval = EFAULT;
26584 26582 goto done;
26585 26583 }
26586 26584 li.listsize = li32.listsize;
26587 26585 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
26588 26586 break;
26589 26587
26590 26588 case DDI_MODEL_NONE:
26591 26589 copysz = sizeof (mhioc_key_list_t);
26592 26590 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26593 26591 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26594 26592 "sd_persistent_reservation_in_read_keys: "
26595 26593 "failed ddi_copyin: mhioc_key_list_t\n");
26596 26594 rval = EFAULT;
26597 26595 goto done;
26598 26596 }
26599 26597 break;
26600 26598 }
26601 26599
26602 26600 #else /* ! _MULTI_DATAMODEL */
26603 26601 copysz = sizeof (mhioc_key_list_t);
26604 26602 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26605 26603 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26606 26604 "sd_persistent_reservation_in_read_keys: "
26607 26605 "failed ddi_copyin: mhioc_key_list_t\n");
26608 26606 rval = EFAULT;
26609 26607 goto done;
26610 26608 }
26611 26609 #endif
26612 26610
26613 26611 data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
26614 26612 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
26615 26613 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26616 26614
26617 26615 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
26618 26616 data_len, data_bufp);
26619 26617 if (rval != 0) {
26620 26618 if (rval == EIO)
26621 26619 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26622 26620 else
26623 26621 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26624 26622 goto done;
26625 26623 }
26626 26624 in = (sd_prin_readkeys_t *)data_bufp;
26627 26625 ptr->generation = BE_32(in->generation);
26628 26626 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
26629 26627
26630 26628 /*
26631 26629 * Return the min(listsize, listlen) keys
26632 26630 */
26633 26631 #ifdef _MULTI_DATAMODEL
26634 26632
26635 26633 switch (ddi_model_convert_from(flag & FMODELS)) {
26636 26634 case DDI_MODEL_ILP32:
26637 26635 li32.listlen = li.listlen;
26638 26636 if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
26639 26637 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26640 26638 "sd_persistent_reservation_in_read_keys: "
26641 26639 "failed ddi_copyout: mhioc_key_list32_t\n");
26642 26640 rval = EFAULT;
26643 26641 goto done;
26644 26642 }
26645 26643 break;
26646 26644
26647 26645 case DDI_MODEL_NONE:
26648 26646 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26649 26647 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26650 26648 "sd_persistent_reservation_in_read_keys: "
26651 26649 "failed ddi_copyout: mhioc_key_list_t\n");
26652 26650 rval = EFAULT;
26653 26651 goto done;
26654 26652 }
26655 26653 break;
26656 26654 }
26657 26655
26658 26656 #else /* ! _MULTI_DATAMODEL */
26659 26657
26660 26658 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26661 26659 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26662 26660 "sd_persistent_reservation_in_read_keys: "
26663 26661 "failed ddi_copyout: mhioc_key_list_t\n");
26664 26662 rval = EFAULT;
26665 26663 goto done;
26666 26664 }
26667 26665
26668 26666 #endif /* _MULTI_DATAMODEL */
26669 26667
26670 26668 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
26671 26669 li.listsize * MHIOC_RESV_KEY_SIZE);
26672 26670 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
26673 26671 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26674 26672 "sd_persistent_reservation_in_read_keys: "
26675 26673 "failed ddi_copyout: keylist\n");
26676 26674 rval = EFAULT;
26677 26675 }
26678 26676 done:
26679 26677 sd_ssc_fini(ssc);
26680 26678 kmem_free(data_bufp, data_len);
26681 26679 return (rval);
26682 26680 }
26683 26681
26684 26682
26685 26683 /*
26686 26684 * Function: sd_persistent_reservation_in_read_resv
26687 26685 *
26688 26686 * Description: This routine is the driver entry point for handling CD-ROM
26689 26687 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26690 26688 * by sending the SCSI-3 PRIN commands to the device.
26691 26689 * Process the read persistent reservations command response by
26692 26690 * copying the reservation information into the user provided
26693 26691 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26694 26692 *
26695 26693 * Arguments: un - Pointer to soft state struct for the target.
26696 26694 * usrp - user provided pointer to multihost Persistent In Read
26697 26695 * Keys structure (mhioc_inkeys_t)
26698 26696 * flag - this argument is a pass through to ddi_copyxxx()
26699 26697 * directly from the mode argument of ioctl().
26700 26698 *
26701 26699 * Return Code: 0 - Success
26702 26700 * EACCES
26703 26701 * ENOTSUP
26704 26702 * errno return code from sd_send_scsi_cmd()
26705 26703 *
26706 26704 * Context: Can sleep. Does not return until command is completed.
26707 26705 */
26708 26706
26709 26707 static int
26710 26708 sd_persistent_reservation_in_read_resv(struct sd_lun *un,
26711 26709 mhioc_inresvs_t *usrp, int flag)
26712 26710 {
26713 26711 #ifdef _MULTI_DATAMODEL
26714 26712 struct mhioc_resv_desc_list32 resvlist32;
26715 26713 #endif
26716 26714 sd_prin_readresv_t *in;
26717 26715 mhioc_inresvs_t *ptr;
26718 26716 sd_readresv_desc_t *readresv_ptr;
26719 26717 mhioc_resv_desc_list_t resvlist;
26720 26718 mhioc_resv_desc_t resvdesc;
26721 26719 uchar_t *data_bufp = NULL;
26722 26720 int data_len;
26723 26721 int rval = 0;
26724 26722 int i;
26725 26723 size_t copysz;
26726 26724 mhioc_resv_desc_t *bufp;
26727 26725 sd_ssc_t *ssc;
26728 26726
26729 26727 if ((ptr = usrp) == NULL) {
26730 26728 return (EINVAL);
26731 26729 }
26732 26730
26733 26731 ssc = sd_ssc_init(un);
26734 26732
26735 26733 /*
26736 26734 * Get the listsize from user
26737 26735 */
26738 26736 #ifdef _MULTI_DATAMODEL
26739 26737 switch (ddi_model_convert_from(flag & FMODELS)) {
26740 26738 case DDI_MODEL_ILP32:
26741 26739 copysz = sizeof (struct mhioc_resv_desc_list32);
26742 26740 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
26743 26741 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26744 26742 "sd_persistent_reservation_in_read_resv: "
26745 26743 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26746 26744 rval = EFAULT;
26747 26745 goto done;
26748 26746 }
26749 26747 resvlist.listsize = resvlist32.listsize;
26750 26748 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
26751 26749 break;
26752 26750
26753 26751 case DDI_MODEL_NONE:
26754 26752 copysz = sizeof (mhioc_resv_desc_list_t);
26755 26753 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26756 26754 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26757 26755 "sd_persistent_reservation_in_read_resv: "
26758 26756 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26759 26757 rval = EFAULT;
26760 26758 goto done;
26761 26759 }
26762 26760 break;
26763 26761 }
26764 26762 #else /* ! _MULTI_DATAMODEL */
26765 26763 copysz = sizeof (mhioc_resv_desc_list_t);
26766 26764 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26767 26765 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26768 26766 "sd_persistent_reservation_in_read_resv: "
26769 26767 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26770 26768 rval = EFAULT;
26771 26769 goto done;
26772 26770 }
26773 26771 #endif /* ! _MULTI_DATAMODEL */
26774 26772
26775 26773 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
26776 26774 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
26777 26775 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26778 26776
26779 26777 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
26780 26778 data_len, data_bufp);
26781 26779 if (rval != 0) {
26782 26780 if (rval == EIO)
26783 26781 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26784 26782 else
26785 26783 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26786 26784 goto done;
26787 26785 }
26788 26786 in = (sd_prin_readresv_t *)data_bufp;
26789 26787 ptr->generation = BE_32(in->generation);
26790 26788 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
26791 26789
26792 26790 /*
26793 26791 * Return the min(listsize, listlen( keys
26794 26792 */
26795 26793 #ifdef _MULTI_DATAMODEL
26796 26794
26797 26795 switch (ddi_model_convert_from(flag & FMODELS)) {
26798 26796 case DDI_MODEL_ILP32:
26799 26797 resvlist32.listlen = resvlist.listlen;
26800 26798 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
26801 26799 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26802 26800 "sd_persistent_reservation_in_read_resv: "
26803 26801 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26804 26802 rval = EFAULT;
26805 26803 goto done;
26806 26804 }
26807 26805 break;
26808 26806
26809 26807 case DDI_MODEL_NONE:
26810 26808 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26811 26809 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26812 26810 "sd_persistent_reservation_in_read_resv: "
26813 26811 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26814 26812 rval = EFAULT;
26815 26813 goto done;
26816 26814 }
26817 26815 break;
26818 26816 }
26819 26817
26820 26818 #else /* ! _MULTI_DATAMODEL */
26821 26819
26822 26820 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26823 26821 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26824 26822 "sd_persistent_reservation_in_read_resv: "
26825 26823 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26826 26824 rval = EFAULT;
26827 26825 goto done;
26828 26826 }
26829 26827
26830 26828 #endif /* ! _MULTI_DATAMODEL */
26831 26829
26832 26830 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
26833 26831 bufp = resvlist.list;
26834 26832 copysz = sizeof (mhioc_resv_desc_t);
26835 26833 for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
26836 26834 i++, readresv_ptr++, bufp++) {
26837 26835
26838 26836 bcopy(&readresv_ptr->resvkey, &resvdesc.key,
26839 26837 MHIOC_RESV_KEY_SIZE);
26840 26838 resvdesc.type = readresv_ptr->type;
26841 26839 resvdesc.scope = readresv_ptr->scope;
26842 26840 resvdesc.scope_specific_addr =
26843 26841 BE_32(readresv_ptr->scope_specific_addr);
26844 26842
26845 26843 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
26846 26844 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26847 26845 "sd_persistent_reservation_in_read_resv: "
26848 26846 "failed ddi_copyout: resvlist\n");
26849 26847 rval = EFAULT;
26850 26848 goto done;
26851 26849 }
26852 26850 }
26853 26851 done:
26854 26852 sd_ssc_fini(ssc);
26855 26853 /* only if data_bufp is allocated, we need to free it */
26856 26854 if (data_bufp) {
26857 26855 kmem_free(data_bufp, data_len);
26858 26856 }
26859 26857 return (rval);
26860 26858 }
26861 26859
26862 26860
26863 26861 /*
26864 26862 * Function: sr_change_blkmode()
26865 26863 *
26866 26864 * Description: This routine is the driver entry point for handling CD-ROM
26867 26865 * block mode ioctl requests. Support for returning and changing
26868 26866 * the current block size in use by the device is implemented. The
26869 26867 * LBA size is changed via a MODE SELECT Block Descriptor.
26870 26868 *
26871 26869 * This routine issues a mode sense with an allocation length of
26872 26870 * 12 bytes for the mode page header and a single block descriptor.
26873 26871 *
26874 26872 * Arguments: dev - the device 'dev_t'
26875 26873 * cmd - the request type; one of CDROMGBLKMODE (get) or
26876 26874 * CDROMSBLKMODE (set)
26877 26875 * data - current block size or requested block size
26878 26876 * flag - this argument is a pass through to ddi_copyxxx() directly
26879 26877 * from the mode argument of ioctl().
26880 26878 *
26881 26879 * Return Code: the code returned by sd_send_scsi_cmd()
26882 26880 * EINVAL if invalid arguments are provided
26883 26881 * EFAULT if ddi_copyxxx() fails
26884 26882 * ENXIO if fail ddi_get_soft_state
26885 26883 * EIO if invalid mode sense block descriptor length
26886 26884 *
26887 26885 */
26888 26886
26889 26887 static int
26890 26888 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
26891 26889 {
26892 26890 struct sd_lun *un = NULL;
26893 26891 struct mode_header *sense_mhp, *select_mhp;
26894 26892 struct block_descriptor *sense_desc, *select_desc;
26895 26893 int current_bsize;
26896 26894 int rval = EINVAL;
26897 26895 uchar_t *sense = NULL;
26898 26896 uchar_t *select = NULL;
26899 26897 sd_ssc_t *ssc;
26900 26898
26901 26899 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
26902 26900
26903 26901 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
26904 26902 return (ENXIO);
26905 26903 }
26906 26904
26907 26905 /*
26908 26906 * The block length is changed via the Mode Select block descriptor, the
26909 26907 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26910 26908 * required as part of this routine. Therefore the mode sense allocation
26911 26909 * length is specified to be the length of a mode page header and a
26912 26910 * block descriptor.
26913 26911 */
26914 26912 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26915 26913
26916 26914 ssc = sd_ssc_init(un);
26917 26915 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
26918 26916 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
26919 26917 sd_ssc_fini(ssc);
26920 26918 if (rval != 0) {
26921 26919 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26922 26920 "sr_change_blkmode: Mode Sense Failed\n");
26923 26921 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26924 26922 return (rval);
26925 26923 }
26926 26924
26927 26925 /* Check the block descriptor len to handle only 1 block descriptor */
26928 26926 sense_mhp = (struct mode_header *)sense;
26929 26927 if ((sense_mhp->bdesc_length == 0) ||
26930 26928 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
26931 26929 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26932 26930 "sr_change_blkmode: Mode Sense returned invalid block"
26933 26931 " descriptor length\n");
26934 26932 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26935 26933 return (EIO);
26936 26934 }
26937 26935 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
26938 26936 current_bsize = ((sense_desc->blksize_hi << 16) |
26939 26937 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
26940 26938
26941 26939 /* Process command */
26942 26940 switch (cmd) {
26943 26941 case CDROMGBLKMODE:
26944 26942 /* Return the block size obtained during the mode sense */
26945 26943 if (ddi_copyout(¤t_bsize, (void *)data,
26946 26944 sizeof (int), flag) != 0)
26947 26945 rval = EFAULT;
26948 26946 break;
26949 26947 case CDROMSBLKMODE:
26950 26948 /* Validate the requested block size */
26951 26949 switch (data) {
26952 26950 case CDROM_BLK_512:
26953 26951 case CDROM_BLK_1024:
26954 26952 case CDROM_BLK_2048:
26955 26953 case CDROM_BLK_2056:
26956 26954 case CDROM_BLK_2336:
26957 26955 case CDROM_BLK_2340:
26958 26956 case CDROM_BLK_2352:
26959 26957 case CDROM_BLK_2368:
26960 26958 case CDROM_BLK_2448:
26961 26959 case CDROM_BLK_2646:
26962 26960 case CDROM_BLK_2647:
26963 26961 break;
26964 26962 default:
26965 26963 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26966 26964 "sr_change_blkmode: "
26967 26965 "Block Size '%ld' Not Supported\n", data);
26968 26966 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26969 26967 return (EINVAL);
26970 26968 }
26971 26969
26972 26970 /*
26973 26971 * The current block size matches the requested block size so
26974 26972 * there is no need to send the mode select to change the size
26975 26973 */
26976 26974 if (current_bsize == data) {
26977 26975 break;
26978 26976 }
26979 26977
26980 26978 /* Build the select data for the requested block size */
26981 26979 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26982 26980 select_mhp = (struct mode_header *)select;
26983 26981 select_desc =
26984 26982 (struct block_descriptor *)(select + MODE_HEADER_LENGTH);
26985 26983 /*
26986 26984 * The LBA size is changed via the block descriptor, so the
26987 26985 * descriptor is built according to the user data
26988 26986 */
26989 26987 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
26990 26988 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
26991 26989 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
26992 26990 select_desc->blksize_lo = (char)((data) & 0x000000ff);
26993 26991
26994 26992 /* Send the mode select for the requested block size */
26995 26993 ssc = sd_ssc_init(un);
26996 26994 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26997 26995 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26998 26996 SD_PATH_STANDARD);
26999 26997 sd_ssc_fini(ssc);
27000 26998 if (rval != 0) {
27001 26999 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27002 27000 "sr_change_blkmode: Mode Select Failed\n");
27003 27001 /*
27004 27002 * The mode select failed for the requested block size,
27005 27003 * so reset the data for the original block size and
27006 27004 * send it to the target. The error is indicated by the
27007 27005 * return value for the failed mode select.
27008 27006 */
27009 27007 select_desc->blksize_hi = sense_desc->blksize_hi;
27010 27008 select_desc->blksize_mid = sense_desc->blksize_mid;
27011 27009 select_desc->blksize_lo = sense_desc->blksize_lo;
27012 27010 ssc = sd_ssc_init(un);
27013 27011 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
27014 27012 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
27015 27013 SD_PATH_STANDARD);
27016 27014 sd_ssc_fini(ssc);
27017 27015 } else {
27018 27016 ASSERT(!mutex_owned(SD_MUTEX(un)));
27019 27017 mutex_enter(SD_MUTEX(un));
27020 27018 sd_update_block_info(un, (uint32_t)data, 0);
27021 27019 mutex_exit(SD_MUTEX(un));
27022 27020 }
27023 27021 break;
27024 27022 default:
27025 27023 /* should not reach here, but check anyway */
27026 27024 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27027 27025 "sr_change_blkmode: Command '%x' Not Supported\n", cmd);
27028 27026 rval = EINVAL;
27029 27027 break;
27030 27028 }
27031 27029
27032 27030 if (select) {
27033 27031 kmem_free(select, BUFLEN_CHG_BLK_MODE);
27034 27032 }
27035 27033 if (sense) {
27036 27034 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
27037 27035 }
27038 27036 return (rval);
27039 27037 }
27040 27038
27041 27039
27042 27040 /*
27043 27041 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
27044 27042 * implement driver support for getting and setting the CD speed. The command
27045 27043 * set used will be based on the device type. If the device has not been
27046 27044 * identified as MMC the Toshiba vendor specific mode page will be used. If
27047 27045 * the device is MMC but does not support the Real Time Streaming feature
27048 27046 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
27049 27047 * be used to read the speed.
27050 27048 */
27051 27049
27052 27050 /*
27053 27051 * Function: sr_change_speed()
27054 27052 *
27055 27053 * Description: This routine is the driver entry point for handling CD-ROM
27056 27054 * drive speed ioctl requests for devices supporting the Toshiba
27057 27055 * vendor specific drive speed mode page. Support for returning
27058 27056 * and changing the current drive speed in use by the device is
27059 27057 * implemented.
27060 27058 *
27061 27059 * Arguments: dev - the device 'dev_t'
27062 27060 * cmd - the request type; one of CDROMGDRVSPEED (get) or
27063 27061 * CDROMSDRVSPEED (set)
27064 27062 * data - current drive speed or requested drive speed
27065 27063 * flag - this argument is a pass through to ddi_copyxxx() directly
27066 27064 * from the mode argument of ioctl().
27067 27065 *
27068 27066 * Return Code: the code returned by sd_send_scsi_cmd()
27069 27067 * EINVAL if invalid arguments are provided
27070 27068 * EFAULT if ddi_copyxxx() fails
27071 27069 * ENXIO if fail ddi_get_soft_state
27072 27070 * EIO if invalid mode sense block descriptor length
27073 27071 */
27074 27072
27075 27073 static int
27076 27074 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27077 27075 {
27078 27076 struct sd_lun *un = NULL;
27079 27077 struct mode_header *sense_mhp, *select_mhp;
27080 27078 struct mode_speed *sense_page, *select_page;
27081 27079 int current_speed;
27082 27080 int rval = EINVAL;
27083 27081 int bd_len;
27084 27082 uchar_t *sense = NULL;
27085 27083 uchar_t *select = NULL;
27086 27084 sd_ssc_t *ssc;
27087 27085
27088 27086 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27089 27087 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27090 27088 return (ENXIO);
27091 27089 }
27092 27090
27093 27091 /*
27094 27092 * Note: The drive speed is being modified here according to a Toshiba
27095 27093 * vendor specific mode page (0x31).
27096 27094 */
27097 27095 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27098 27096
27099 27097 ssc = sd_ssc_init(un);
27100 27098 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
27101 27099 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
27102 27100 SD_PATH_STANDARD);
27103 27101 sd_ssc_fini(ssc);
27104 27102 if (rval != 0) {
27105 27103 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27106 27104 "sr_change_speed: Mode Sense Failed\n");
27107 27105 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27108 27106 return (rval);
27109 27107 }
27110 27108 sense_mhp = (struct mode_header *)sense;
27111 27109
27112 27110 /* Check the block descriptor len to handle only 1 block descriptor */
27113 27111 bd_len = sense_mhp->bdesc_length;
27114 27112 if (bd_len > MODE_BLK_DESC_LENGTH) {
27115 27113 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27116 27114 "sr_change_speed: Mode Sense returned invalid block "
27117 27115 "descriptor length\n");
27118 27116 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27119 27117 return (EIO);
27120 27118 }
27121 27119
27122 27120 sense_page = (struct mode_speed *)
27123 27121 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
27124 27122 current_speed = sense_page->speed;
27125 27123
27126 27124 /* Process command */
27127 27125 switch (cmd) {
27128 27126 case CDROMGDRVSPEED:
27129 27127 /* Return the drive speed obtained during the mode sense */
27130 27128 if (current_speed == 0x2) {
27131 27129 current_speed = CDROM_TWELVE_SPEED;
27132 27130 }
27133 27131 if (ddi_copyout(¤t_speed, (void *)data,
27134 27132 sizeof (int), flag) != 0) {
27135 27133 rval = EFAULT;
27136 27134 }
27137 27135 break;
27138 27136 case CDROMSDRVSPEED:
27139 27137 /* Validate the requested drive speed */
27140 27138 switch ((uchar_t)data) {
27141 27139 case CDROM_TWELVE_SPEED:
27142 27140 data = 0x2;
27143 27141 /*FALLTHROUGH*/
27144 27142 case CDROM_NORMAL_SPEED:
27145 27143 case CDROM_DOUBLE_SPEED:
27146 27144 case CDROM_QUAD_SPEED:
27147 27145 case CDROM_MAXIMUM_SPEED:
27148 27146 break;
27149 27147 default:
27150 27148 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27151 27149 "sr_change_speed: "
27152 27150 "Drive Speed '%d' Not Supported\n", (uchar_t)data);
27153 27151 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27154 27152 return (EINVAL);
27155 27153 }
27156 27154
27157 27155 /*
27158 27156 * The current drive speed matches the requested drive speed so
27159 27157 * there is no need to send the mode select to change the speed
27160 27158 */
27161 27159 if (current_speed == data) {
27162 27160 break;
27163 27161 }
27164 27162
27165 27163 /* Build the select data for the requested drive speed */
27166 27164 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27167 27165 select_mhp = (struct mode_header *)select;
27168 27166 select_mhp->bdesc_length = 0;
27169 27167 select_page =
27170 27168 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27171 27169 select_page =
27172 27170 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27173 27171 select_page->mode_page.code = CDROM_MODE_SPEED;
27174 27172 select_page->mode_page.length = 2;
27175 27173 select_page->speed = (uchar_t)data;
27176 27174
27177 27175 /* Send the mode select for the requested block size */
27178 27176 ssc = sd_ssc_init(un);
27179 27177 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27180 27178 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27181 27179 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27182 27180 sd_ssc_fini(ssc);
27183 27181 if (rval != 0) {
27184 27182 /*
27185 27183 * The mode select failed for the requested drive speed,
27186 27184 * so reset the data for the original drive speed and
27187 27185 * send it to the target. The error is indicated by the
27188 27186 * return value for the failed mode select.
27189 27187 */
27190 27188 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27191 27189 "sr_drive_speed: Mode Select Failed\n");
27192 27190 select_page->speed = sense_page->speed;
27193 27191 ssc = sd_ssc_init(un);
27194 27192 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27195 27193 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27196 27194 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27197 27195 sd_ssc_fini(ssc);
27198 27196 }
27199 27197 break;
27200 27198 default:
27201 27199 /* should not reach here, but check anyway */
27202 27200 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27203 27201 "sr_change_speed: Command '%x' Not Supported\n", cmd);
27204 27202 rval = EINVAL;
27205 27203 break;
27206 27204 }
27207 27205
27208 27206 if (select) {
27209 27207 kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
27210 27208 }
27211 27209 if (sense) {
27212 27210 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27213 27211 }
27214 27212
27215 27213 return (rval);
27216 27214 }
27217 27215
27218 27216
27219 27217 /*
27220 27218 * Function: sr_atapi_change_speed()
27221 27219 *
27222 27220 * Description: This routine is the driver entry point for handling CD-ROM
27223 27221 * drive speed ioctl requests for MMC devices that do not support
27224 27222 * the Real Time Streaming feature (0x107).
27225 27223 *
27226 27224 * Note: This routine will use the SET SPEED command which may not
27227 27225 * be supported by all devices.
27228 27226 *
27229 27227 * Arguments: dev- the device 'dev_t'
27230 27228 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27231 27229 * CDROMSDRVSPEED (set)
27232 27230 * data- current drive speed or requested drive speed
27233 27231 * flag- this argument is a pass through to ddi_copyxxx() directly
27234 27232 * from the mode argument of ioctl().
27235 27233 *
27236 27234 * Return Code: the code returned by sd_send_scsi_cmd()
27237 27235 * EINVAL if invalid arguments are provided
27238 27236 * EFAULT if ddi_copyxxx() fails
27239 27237 * ENXIO if fail ddi_get_soft_state
27240 27238 * EIO if invalid mode sense block descriptor length
27241 27239 */
27242 27240
27243 27241 static int
27244 27242 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27245 27243 {
27246 27244 struct sd_lun *un;
27247 27245 struct uscsi_cmd *com = NULL;
27248 27246 struct mode_header_grp2 *sense_mhp;
27249 27247 uchar_t *sense_page;
27250 27248 uchar_t *sense = NULL;
27251 27249 char cdb[CDB_GROUP5];
27252 27250 int bd_len;
27253 27251 int current_speed = 0;
27254 27252 int max_speed = 0;
27255 27253 int rval;
27256 27254 sd_ssc_t *ssc;
27257 27255
27258 27256 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27259 27257
27260 27258 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27261 27259 return (ENXIO);
27262 27260 }
27263 27261
27264 27262 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
27265 27263
27266 27264 ssc = sd_ssc_init(un);
27267 27265 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
27268 27266 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
27269 27267 SD_PATH_STANDARD);
27270 27268 sd_ssc_fini(ssc);
27271 27269 if (rval != 0) {
27272 27270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27273 27271 "sr_atapi_change_speed: Mode Sense Failed\n");
27274 27272 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27275 27273 return (rval);
27276 27274 }
27277 27275
27278 27276 /* Check the block descriptor len to handle only 1 block descriptor */
27279 27277 sense_mhp = (struct mode_header_grp2 *)sense;
27280 27278 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
27281 27279 if (bd_len > MODE_BLK_DESC_LENGTH) {
27282 27280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27283 27281 "sr_atapi_change_speed: Mode Sense returned invalid "
27284 27282 "block descriptor length\n");
27285 27283 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27286 27284 return (EIO);
27287 27285 }
27288 27286
27289 27287 /* Calculate the current and maximum drive speeds */
27290 27288 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
27291 27289 current_speed = (sense_page[14] << 8) | sense_page[15];
27292 27290 max_speed = (sense_page[8] << 8) | sense_page[9];
27293 27291
27294 27292 /* Process the command */
27295 27293 switch (cmd) {
27296 27294 case CDROMGDRVSPEED:
27297 27295 current_speed /= SD_SPEED_1X;
27298 27296 if (ddi_copyout(¤t_speed, (void *)data,
27299 27297 sizeof (int), flag) != 0)
27300 27298 rval = EFAULT;
27301 27299 break;
27302 27300 case CDROMSDRVSPEED:
27303 27301 /* Convert the speed code to KB/sec */
27304 27302 switch ((uchar_t)data) {
27305 27303 case CDROM_NORMAL_SPEED:
27306 27304 current_speed = SD_SPEED_1X;
27307 27305 break;
27308 27306 case CDROM_DOUBLE_SPEED:
27309 27307 current_speed = 2 * SD_SPEED_1X;
27310 27308 break;
27311 27309 case CDROM_QUAD_SPEED:
27312 27310 current_speed = 4 * SD_SPEED_1X;
27313 27311 break;
27314 27312 case CDROM_TWELVE_SPEED:
27315 27313 current_speed = 12 * SD_SPEED_1X;
27316 27314 break;
27317 27315 case CDROM_MAXIMUM_SPEED:
27318 27316 current_speed = 0xffff;
27319 27317 break;
27320 27318 default:
27321 27319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27322 27320 "sr_atapi_change_speed: invalid drive speed %d\n",
27323 27321 (uchar_t)data);
27324 27322 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27325 27323 return (EINVAL);
27326 27324 }
27327 27325
27328 27326 /* Check the request against the drive's max speed. */
27329 27327 if (current_speed != 0xffff) {
27330 27328 if (current_speed > max_speed) {
27331 27329 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27332 27330 return (EINVAL);
27333 27331 }
27334 27332 }
27335 27333
27336 27334 /*
27337 27335 * Build and send the SET SPEED command
27338 27336 *
27339 27337 * Note: The SET SPEED (0xBB) command used in this routine is
27340 27338 * obsolete per the SCSI MMC spec but still supported in the
27341 27339 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27342 27340 * therefore the command is still implemented in this routine.
27343 27341 */
27344 27342 bzero(cdb, sizeof (cdb));
27345 27343 cdb[0] = (char)SCMD_SET_CDROM_SPEED;
27346 27344 cdb[2] = (uchar_t)(current_speed >> 8);
27347 27345 cdb[3] = (uchar_t)current_speed;
27348 27346 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27349 27347 com->uscsi_cdb = (caddr_t)cdb;
27350 27348 com->uscsi_cdblen = CDB_GROUP5;
27351 27349 com->uscsi_bufaddr = NULL;
27352 27350 com->uscsi_buflen = 0;
27353 27351 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27354 27352 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
27355 27353 break;
27356 27354 default:
27357 27355 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27358 27356 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
27359 27357 rval = EINVAL;
27360 27358 }
27361 27359
27362 27360 if (sense) {
27363 27361 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27364 27362 }
27365 27363 if (com) {
27366 27364 kmem_free(com, sizeof (*com));
27367 27365 }
27368 27366 return (rval);
27369 27367 }
27370 27368
27371 27369
27372 27370 /*
27373 27371 * Function: sr_pause_resume()
27374 27372 *
27375 27373 * Description: This routine is the driver entry point for handling CD-ROM
27376 27374 * pause/resume ioctl requests. This only affects the audio play
27377 27375 * operation.
27378 27376 *
27379 27377 * Arguments: dev - the device 'dev_t'
27380 27378 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27381 27379 * for setting the resume bit of the cdb.
27382 27380 *
27383 27381 * Return Code: the code returned by sd_send_scsi_cmd()
27384 27382 * EINVAL if invalid mode specified
27385 27383 *
27386 27384 */
27387 27385
27388 27386 static int
27389 27387 sr_pause_resume(dev_t dev, int cmd)
27390 27388 {
27391 27389 struct sd_lun *un;
27392 27390 struct uscsi_cmd *com;
27393 27391 char cdb[CDB_GROUP1];
27394 27392 int rval;
27395 27393
27396 27394 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27397 27395 return (ENXIO);
27398 27396 }
27399 27397
27400 27398 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27401 27399 bzero(cdb, CDB_GROUP1);
27402 27400 cdb[0] = SCMD_PAUSE_RESUME;
27403 27401 switch (cmd) {
27404 27402 case CDROMRESUME:
27405 27403 cdb[8] = 1;
27406 27404 break;
27407 27405 case CDROMPAUSE:
27408 27406 cdb[8] = 0;
27409 27407 break;
27410 27408 default:
27411 27409 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
27412 27410 " Command '%x' Not Supported\n", cmd);
27413 27411 rval = EINVAL;
27414 27412 goto done;
27415 27413 }
27416 27414
27417 27415 com->uscsi_cdb = cdb;
27418 27416 com->uscsi_cdblen = CDB_GROUP1;
27419 27417 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27420 27418
27421 27419 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27422 27420 SD_PATH_STANDARD);
27423 27421
27424 27422 done:
27425 27423 kmem_free(com, sizeof (*com));
27426 27424 return (rval);
27427 27425 }
27428 27426
27429 27427
27430 27428 /*
27431 27429 * Function: sr_play_msf()
27432 27430 *
27433 27431 * Description: This routine is the driver entry point for handling CD-ROM
27434 27432 * ioctl requests to output the audio signals at the specified
27435 27433 * starting address and continue the audio play until the specified
27436 27434 * ending address (CDROMPLAYMSF) The address is in Minute Second
27437 27435 * Frame (MSF) format.
27438 27436 *
27439 27437 * Arguments: dev - the device 'dev_t'
27440 27438 * data - pointer to user provided audio msf structure,
27441 27439 * specifying start/end addresses.
27442 27440 * flag - this argument is a pass through to ddi_copyxxx()
27443 27441 * directly from the mode argument of ioctl().
27444 27442 *
27445 27443 * Return Code: the code returned by sd_send_scsi_cmd()
27446 27444 * EFAULT if ddi_copyxxx() fails
27447 27445 * ENXIO if fail ddi_get_soft_state
27448 27446 * EINVAL if data pointer is NULL
27449 27447 */
27450 27448
27451 27449 static int
27452 27450 sr_play_msf(dev_t dev, caddr_t data, int flag)
27453 27451 {
27454 27452 struct sd_lun *un;
27455 27453 struct uscsi_cmd *com;
27456 27454 struct cdrom_msf msf_struct;
27457 27455 struct cdrom_msf *msf = &msf_struct;
27458 27456 char cdb[CDB_GROUP1];
27459 27457 int rval;
27460 27458
27461 27459 if (data == NULL) {
27462 27460 return (EINVAL);
27463 27461 }
27464 27462
27465 27463 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27466 27464 return (ENXIO);
27467 27465 }
27468 27466
27469 27467 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
27470 27468 return (EFAULT);
27471 27469 }
27472 27470
27473 27471 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27474 27472 bzero(cdb, CDB_GROUP1);
27475 27473 cdb[0] = SCMD_PLAYAUDIO_MSF;
27476 27474 if (un->un_f_cfg_playmsf_bcd == TRUE) {
27477 27475 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
27478 27476 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
27479 27477 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
27480 27478 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
27481 27479 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
27482 27480 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
27483 27481 } else {
27484 27482 cdb[3] = msf->cdmsf_min0;
27485 27483 cdb[4] = msf->cdmsf_sec0;
27486 27484 cdb[5] = msf->cdmsf_frame0;
27487 27485 cdb[6] = msf->cdmsf_min1;
27488 27486 cdb[7] = msf->cdmsf_sec1;
27489 27487 cdb[8] = msf->cdmsf_frame1;
27490 27488 }
27491 27489 com->uscsi_cdb = cdb;
27492 27490 com->uscsi_cdblen = CDB_GROUP1;
27493 27491 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27494 27492 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27495 27493 SD_PATH_STANDARD);
27496 27494 kmem_free(com, sizeof (*com));
27497 27495 return (rval);
27498 27496 }
27499 27497
27500 27498
27501 27499 /*
27502 27500 * Function: sr_play_trkind()
27503 27501 *
27504 27502 * Description: This routine is the driver entry point for handling CD-ROM
27505 27503 * ioctl requests to output the audio signals at the specified
27506 27504 * starting address and continue the audio play until the specified
27507 27505 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27508 27506 * format.
27509 27507 *
27510 27508 * Arguments: dev - the device 'dev_t'
27511 27509 * data - pointer to user provided audio track/index structure,
27512 27510 * specifying start/end addresses.
27513 27511 * flag - this argument is a pass through to ddi_copyxxx()
27514 27512 * directly from the mode argument of ioctl().
27515 27513 *
27516 27514 * Return Code: the code returned by sd_send_scsi_cmd()
27517 27515 * EFAULT if ddi_copyxxx() fails
27518 27516 * ENXIO if fail ddi_get_soft_state
27519 27517 * EINVAL if data pointer is NULL
27520 27518 */
27521 27519
27522 27520 static int
27523 27521 sr_play_trkind(dev_t dev, caddr_t data, int flag)
27524 27522 {
27525 27523 struct cdrom_ti ti_struct;
27526 27524 struct cdrom_ti *ti = &ti_struct;
27527 27525 struct uscsi_cmd *com = NULL;
27528 27526 char cdb[CDB_GROUP1];
27529 27527 int rval;
27530 27528
27531 27529 if (data == NULL) {
27532 27530 return (EINVAL);
27533 27531 }
27534 27532
27535 27533 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
27536 27534 return (EFAULT);
27537 27535 }
27538 27536
27539 27537 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27540 27538 bzero(cdb, CDB_GROUP1);
27541 27539 cdb[0] = SCMD_PLAYAUDIO_TI;
27542 27540 cdb[4] = ti->cdti_trk0;
27543 27541 cdb[5] = ti->cdti_ind0;
27544 27542 cdb[7] = ti->cdti_trk1;
27545 27543 cdb[8] = ti->cdti_ind1;
27546 27544 com->uscsi_cdb = cdb;
27547 27545 com->uscsi_cdblen = CDB_GROUP1;
27548 27546 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27549 27547 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27550 27548 SD_PATH_STANDARD);
27551 27549 kmem_free(com, sizeof (*com));
27552 27550 return (rval);
27553 27551 }
27554 27552
27555 27553
27556 27554 /*
27557 27555 * Function: sr_read_all_subcodes()
27558 27556 *
27559 27557 * Description: This routine is the driver entry point for handling CD-ROM
27560 27558 * ioctl requests to return raw subcode data while the target is
27561 27559 * playing audio (CDROMSUBCODE).
27562 27560 *
27563 27561 * Arguments: dev - the device 'dev_t'
27564 27562 * data - pointer to user provided cdrom subcode structure,
27565 27563 * specifying the transfer length and address.
27566 27564 * flag - this argument is a pass through to ddi_copyxxx()
27567 27565 * directly from the mode argument of ioctl().
27568 27566 *
27569 27567 * Return Code: the code returned by sd_send_scsi_cmd()
27570 27568 * EFAULT if ddi_copyxxx() fails
27571 27569 * ENXIO if fail ddi_get_soft_state
27572 27570 * EINVAL if data pointer is NULL
27573 27571 */
27574 27572
27575 27573 static int
27576 27574 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
27577 27575 {
27578 27576 struct sd_lun *un = NULL;
27579 27577 struct uscsi_cmd *com = NULL;
27580 27578 struct cdrom_subcode *subcode = NULL;
27581 27579 int rval;
27582 27580 size_t buflen;
27583 27581 char cdb[CDB_GROUP5];
27584 27582
27585 27583 #ifdef _MULTI_DATAMODEL
27586 27584 /* To support ILP32 applications in an LP64 world */
27587 27585 struct cdrom_subcode32 cdrom_subcode32;
27588 27586 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
27589 27587 #endif
27590 27588 if (data == NULL) {
27591 27589 return (EINVAL);
27592 27590 }
27593 27591
27594 27592 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27595 27593 return (ENXIO);
27596 27594 }
27597 27595
27598 27596 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
27599 27597
27600 27598 #ifdef _MULTI_DATAMODEL
27601 27599 switch (ddi_model_convert_from(flag & FMODELS)) {
27602 27600 case DDI_MODEL_ILP32:
27603 27601 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
27604 27602 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27605 27603 "sr_read_all_subcodes: ddi_copyin Failed\n");
27606 27604 kmem_free(subcode, sizeof (struct cdrom_subcode));
27607 27605 return (EFAULT);
27608 27606 }
27609 27607 /* Convert the ILP32 uscsi data from the application to LP64 */
27610 27608 cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
27611 27609 break;
27612 27610 case DDI_MODEL_NONE:
27613 27611 if (ddi_copyin(data, subcode,
27614 27612 sizeof (struct cdrom_subcode), flag)) {
27615 27613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27616 27614 "sr_read_all_subcodes: ddi_copyin Failed\n");
27617 27615 kmem_free(subcode, sizeof (struct cdrom_subcode));
27618 27616 return (EFAULT);
27619 27617 }
27620 27618 break;
27621 27619 }
27622 27620 #else /* ! _MULTI_DATAMODEL */
27623 27621 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
27624 27622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27625 27623 "sr_read_all_subcodes: ddi_copyin Failed\n");
27626 27624 kmem_free(subcode, sizeof (struct cdrom_subcode));
27627 27625 return (EFAULT);
27628 27626 }
27629 27627 #endif /* _MULTI_DATAMODEL */
27630 27628
27631 27629 /*
27632 27630 * Since MMC-2 expects max 3 bytes for length, check if the
27633 27631 * length input is greater than 3 bytes
27634 27632 */
27635 27633 if ((subcode->cdsc_length & 0xFF000000) != 0) {
27636 27634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27637 27635 "sr_read_all_subcodes: "
27638 27636 "cdrom transfer length too large: %d (limit %d)\n",
27639 27637 subcode->cdsc_length, 0xFFFFFF);
27640 27638 kmem_free(subcode, sizeof (struct cdrom_subcode));
27641 27639 return (EINVAL);
27642 27640 }
27643 27641
27644 27642 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
27645 27643 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27646 27644 bzero(cdb, CDB_GROUP5);
27647 27645
27648 27646 if (un->un_f_mmc_cap == TRUE) {
27649 27647 cdb[0] = (char)SCMD_READ_CD;
27650 27648 cdb[2] = (char)0xff;
27651 27649 cdb[3] = (char)0xff;
27652 27650 cdb[4] = (char)0xff;
27653 27651 cdb[5] = (char)0xff;
27654 27652 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27655 27653 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27656 27654 cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
27657 27655 cdb[10] = 1;
27658 27656 } else {
27659 27657 /*
27660 27658 * Note: A vendor specific command (0xDF) is being used her to
27661 27659 * request a read of all subcodes.
27662 27660 */
27663 27661 cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
27664 27662 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
27665 27663 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27666 27664 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27667 27665 cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
27668 27666 }
27669 27667 com->uscsi_cdb = cdb;
27670 27668 com->uscsi_cdblen = CDB_GROUP5;
27671 27669 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
27672 27670 com->uscsi_buflen = buflen;
27673 27671 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27674 27672 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
27675 27673 SD_PATH_STANDARD);
27676 27674 kmem_free(subcode, sizeof (struct cdrom_subcode));
27677 27675 kmem_free(com, sizeof (*com));
27678 27676 return (rval);
27679 27677 }
27680 27678
27681 27679
27682 27680 /*
27683 27681 * Function: sr_read_subchannel()
27684 27682 *
27685 27683 * Description: This routine is the driver entry point for handling CD-ROM
27686 27684 * ioctl requests to return the Q sub-channel data of the CD
27687 27685 * current position block. (CDROMSUBCHNL) The data includes the
27688 27686 * track number, index number, absolute CD-ROM address (LBA or MSF
27689 27687 * format per the user) , track relative CD-ROM address (LBA or MSF
27690 27688 * format per the user), control data and audio status.
27691 27689 *
27692 27690 * Arguments: dev - the device 'dev_t'
27693 27691 * data - pointer to user provided cdrom sub-channel structure
27694 27692 * flag - this argument is a pass through to ddi_copyxxx()
27695 27693 * directly from the mode argument of ioctl().
27696 27694 *
27697 27695 * Return Code: the code returned by sd_send_scsi_cmd()
27698 27696 * EFAULT if ddi_copyxxx() fails
27699 27697 * ENXIO if fail ddi_get_soft_state
27700 27698 * EINVAL if data pointer is NULL
27701 27699 */
27702 27700
27703 27701 static int
27704 27702 sr_read_subchannel(dev_t dev, caddr_t data, int flag)
27705 27703 {
27706 27704 struct sd_lun *un;
27707 27705 struct uscsi_cmd *com;
27708 27706 struct cdrom_subchnl subchanel;
27709 27707 struct cdrom_subchnl *subchnl = &subchanel;
27710 27708 char cdb[CDB_GROUP1];
27711 27709 caddr_t buffer;
27712 27710 int rval;
27713 27711
27714 27712 if (data == NULL) {
27715 27713 return (EINVAL);
27716 27714 }
27717 27715
27718 27716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27719 27717 (un->un_state == SD_STATE_OFFLINE)) {
27720 27718 return (ENXIO);
27721 27719 }
27722 27720
27723 27721 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
27724 27722 return (EFAULT);
27725 27723 }
27726 27724
27727 27725 buffer = kmem_zalloc((size_t)16, KM_SLEEP);
27728 27726 bzero(cdb, CDB_GROUP1);
27729 27727 cdb[0] = SCMD_READ_SUBCHANNEL;
27730 27728 /* Set the MSF bit based on the user requested address format */
27731 27729 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
27732 27730 /*
27733 27731 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27734 27732 * returned
27735 27733 */
27736 27734 cdb[2] = 0x40;
27737 27735 /*
27738 27736 * Set byte 3 to specify the return data format. A value of 0x01
27739 27737 * indicates that the CD-ROM current position should be returned.
27740 27738 */
27741 27739 cdb[3] = 0x01;
27742 27740 cdb[8] = 0x10;
27743 27741 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27744 27742 com->uscsi_cdb = cdb;
27745 27743 com->uscsi_cdblen = CDB_GROUP1;
27746 27744 com->uscsi_bufaddr = buffer;
27747 27745 com->uscsi_buflen = 16;
27748 27746 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27749 27747 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27750 27748 SD_PATH_STANDARD);
27751 27749 if (rval != 0) {
27752 27750 kmem_free(buffer, 16);
27753 27751 kmem_free(com, sizeof (*com));
27754 27752 return (rval);
27755 27753 }
27756 27754
27757 27755 /* Process the returned Q sub-channel data */
27758 27756 subchnl->cdsc_audiostatus = buffer[1];
27759 27757 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
27760 27758 subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
27761 27759 subchnl->cdsc_trk = buffer[6];
27762 27760 subchnl->cdsc_ind = buffer[7];
27763 27761 if (subchnl->cdsc_format & CDROM_LBA) {
27764 27762 subchnl->cdsc_absaddr.lba =
27765 27763 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27766 27764 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27767 27765 subchnl->cdsc_reladdr.lba =
27768 27766 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
27769 27767 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
27770 27768 } else if (un->un_f_cfg_readsub_bcd == TRUE) {
27771 27769 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
27772 27770 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
27773 27771 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
27774 27772 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
27775 27773 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
27776 27774 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
27777 27775 } else {
27778 27776 subchnl->cdsc_absaddr.msf.minute = buffer[9];
27779 27777 subchnl->cdsc_absaddr.msf.second = buffer[10];
27780 27778 subchnl->cdsc_absaddr.msf.frame = buffer[11];
27781 27779 subchnl->cdsc_reladdr.msf.minute = buffer[13];
27782 27780 subchnl->cdsc_reladdr.msf.second = buffer[14];
27783 27781 subchnl->cdsc_reladdr.msf.frame = buffer[15];
27784 27782 }
27785 27783 kmem_free(buffer, 16);
27786 27784 kmem_free(com, sizeof (*com));
27787 27785 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
27788 27786 != 0) {
27789 27787 return (EFAULT);
27790 27788 }
27791 27789 return (rval);
27792 27790 }
27793 27791
27794 27792
27795 27793 /*
27796 27794 * Function: sr_read_tocentry()
27797 27795 *
27798 27796 * Description: This routine is the driver entry point for handling CD-ROM
27799 27797 * ioctl requests to read from the Table of Contents (TOC)
27800 27798 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27801 27799 * fields, the starting address (LBA or MSF format per the user)
27802 27800 * and the data mode if the user specified track is a data track.
27803 27801 *
27804 27802 * Note: The READ HEADER (0x44) command used in this routine is
27805 27803 * obsolete per the SCSI MMC spec but still supported in the
27806 27804 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27807 27805 * therefore the command is still implemented in this routine.
27808 27806 *
27809 27807 * Arguments: dev - the device 'dev_t'
27810 27808 * data - pointer to user provided toc entry structure,
27811 27809 * specifying the track # and the address format
27812 27810 * (LBA or MSF).
27813 27811 * flag - this argument is a pass through to ddi_copyxxx()
27814 27812 * directly from the mode argument of ioctl().
27815 27813 *
27816 27814 * Return Code: the code returned by sd_send_scsi_cmd()
27817 27815 * EFAULT if ddi_copyxxx() fails
27818 27816 * ENXIO if fail ddi_get_soft_state
27819 27817 * EINVAL if data pointer is NULL
27820 27818 */
27821 27819
27822 27820 static int
27823 27821 sr_read_tocentry(dev_t dev, caddr_t data, int flag)
27824 27822 {
27825 27823 struct sd_lun *un = NULL;
27826 27824 struct uscsi_cmd *com;
27827 27825 struct cdrom_tocentry toc_entry;
27828 27826 struct cdrom_tocentry *entry = &toc_entry;
27829 27827 caddr_t buffer;
27830 27828 int rval;
27831 27829 char cdb[CDB_GROUP1];
27832 27830
27833 27831 if (data == NULL) {
27834 27832 return (EINVAL);
27835 27833 }
27836 27834
27837 27835 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27838 27836 (un->un_state == SD_STATE_OFFLINE)) {
27839 27837 return (ENXIO);
27840 27838 }
27841 27839
27842 27840 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
27843 27841 return (EFAULT);
27844 27842 }
27845 27843
27846 27844 /* Validate the requested track and address format */
27847 27845 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
27848 27846 return (EINVAL);
27849 27847 }
27850 27848
27851 27849 if (entry->cdte_track == 0) {
27852 27850 return (EINVAL);
27853 27851 }
27854 27852
27855 27853 buffer = kmem_zalloc((size_t)12, KM_SLEEP);
27856 27854 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27857 27855 bzero(cdb, CDB_GROUP1);
27858 27856
27859 27857 cdb[0] = SCMD_READ_TOC;
27860 27858 /* Set the MSF bit based on the user requested address format */
27861 27859 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
27862 27860 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
27863 27861 cdb[6] = BYTE_TO_BCD(entry->cdte_track);
27864 27862 } else {
27865 27863 cdb[6] = entry->cdte_track;
27866 27864 }
27867 27865
27868 27866 /*
27869 27867 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27870 27868 * (4 byte TOC response header + 8 byte track descriptor)
27871 27869 */
27872 27870 cdb[8] = 12;
27873 27871 com->uscsi_cdb = cdb;
27874 27872 com->uscsi_cdblen = CDB_GROUP1;
27875 27873 com->uscsi_bufaddr = buffer;
27876 27874 com->uscsi_buflen = 0x0C;
27877 27875 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
27878 27876 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27879 27877 SD_PATH_STANDARD);
27880 27878 if (rval != 0) {
27881 27879 kmem_free(buffer, 12);
27882 27880 kmem_free(com, sizeof (*com));
27883 27881 return (rval);
27884 27882 }
27885 27883
27886 27884 /* Process the toc entry */
27887 27885 entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
27888 27886 entry->cdte_ctrl = (buffer[5] & 0x0F);
27889 27887 if (entry->cdte_format & CDROM_LBA) {
27890 27888 entry->cdte_addr.lba =
27891 27889 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27892 27890 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27893 27891 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
27894 27892 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
27895 27893 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
27896 27894 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
27897 27895 /*
27898 27896 * Send a READ TOC command using the LBA address format to get
27899 27897 * the LBA for the track requested so it can be used in the
27900 27898 * READ HEADER request
27901 27899 *
27902 27900 * Note: The MSF bit of the READ HEADER command specifies the
27903 27901 * output format. The block address specified in that command
27904 27902 * must be in LBA format.
27905 27903 */
27906 27904 cdb[1] = 0;
27907 27905 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27908 27906 SD_PATH_STANDARD);
27909 27907 if (rval != 0) {
27910 27908 kmem_free(buffer, 12);
27911 27909 kmem_free(com, sizeof (*com));
27912 27910 return (rval);
27913 27911 }
27914 27912 } else {
27915 27913 entry->cdte_addr.msf.minute = buffer[9];
27916 27914 entry->cdte_addr.msf.second = buffer[10];
27917 27915 entry->cdte_addr.msf.frame = buffer[11];
27918 27916 /*
27919 27917 * Send a READ TOC command using the LBA address format to get
27920 27918 * the LBA for the track requested so it can be used in the
27921 27919 * READ HEADER request
27922 27920 *
27923 27921 * Note: The MSF bit of the READ HEADER command specifies the
27924 27922 * output format. The block address specified in that command
27925 27923 * must be in LBA format.
27926 27924 */
27927 27925 cdb[1] = 0;
27928 27926 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27929 27927 SD_PATH_STANDARD);
27930 27928 if (rval != 0) {
27931 27929 kmem_free(buffer, 12);
27932 27930 kmem_free(com, sizeof (*com));
27933 27931 return (rval);
27934 27932 }
27935 27933 }
27936 27934
27937 27935 /*
27938 27936 * Build and send the READ HEADER command to determine the data mode of
27939 27937 * the user specified track.
27940 27938 */
27941 27939 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
27942 27940 (entry->cdte_track != CDROM_LEADOUT)) {
27943 27941 bzero(cdb, CDB_GROUP1);
27944 27942 cdb[0] = SCMD_READ_HEADER;
27945 27943 cdb[2] = buffer[8];
27946 27944 cdb[3] = buffer[9];
27947 27945 cdb[4] = buffer[10];
27948 27946 cdb[5] = buffer[11];
27949 27947 cdb[8] = 0x08;
27950 27948 com->uscsi_buflen = 0x08;
27951 27949 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27952 27950 SD_PATH_STANDARD);
27953 27951 if (rval == 0) {
27954 27952 entry->cdte_datamode = buffer[0];
27955 27953 } else {
27956 27954 /*
27957 27955 * READ HEADER command failed, since this is
27958 27956 * obsoleted in one spec, its better to return
27959 27957 * -1 for an invlid track so that we can still
27960 27958 * receive the rest of the TOC data.
27961 27959 */
27962 27960 entry->cdte_datamode = (uchar_t)-1;
27963 27961 }
27964 27962 } else {
27965 27963 entry->cdte_datamode = (uchar_t)-1;
27966 27964 }
27967 27965
27968 27966 kmem_free(buffer, 12);
27969 27967 kmem_free(com, sizeof (*com));
27970 27968 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
27971 27969 return (EFAULT);
27972 27970
27973 27971 return (rval);
27974 27972 }
27975 27973
27976 27974
27977 27975 /*
27978 27976 * Function: sr_read_tochdr()
27979 27977 *
27980 27978 * Description: This routine is the driver entry point for handling CD-ROM
27981 27979 * ioctl requests to read the Table of Contents (TOC) header
27982 27980 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27983 27981 * and ending track numbers
27984 27982 *
27985 27983 * Arguments: dev - the device 'dev_t'
27986 27984 * data - pointer to user provided toc header structure,
27987 27985 * specifying the starting and ending track numbers.
27988 27986 * flag - this argument is a pass through to ddi_copyxxx()
27989 27987 * directly from the mode argument of ioctl().
27990 27988 *
27991 27989 * Return Code: the code returned by sd_send_scsi_cmd()
27992 27990 * EFAULT if ddi_copyxxx() fails
27993 27991 * ENXIO if fail ddi_get_soft_state
27994 27992 * EINVAL if data pointer is NULL
27995 27993 */
27996 27994
27997 27995 static int
27998 27996 sr_read_tochdr(dev_t dev, caddr_t data, int flag)
27999 27997 {
28000 27998 struct sd_lun *un;
28001 27999 struct uscsi_cmd *com;
28002 28000 struct cdrom_tochdr toc_header;
28003 28001 struct cdrom_tochdr *hdr = &toc_header;
28004 28002 char cdb[CDB_GROUP1];
28005 28003 int rval;
28006 28004 caddr_t buffer;
28007 28005
28008 28006 if (data == NULL) {
28009 28007 return (EINVAL);
28010 28008 }
28011 28009
28012 28010 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28013 28011 (un->un_state == SD_STATE_OFFLINE)) {
28014 28012 return (ENXIO);
28015 28013 }
28016 28014
28017 28015 buffer = kmem_zalloc(4, KM_SLEEP);
28018 28016 bzero(cdb, CDB_GROUP1);
28019 28017 cdb[0] = SCMD_READ_TOC;
28020 28018 /*
28021 28019 * Specifying a track number of 0x00 in the READ TOC command indicates
28022 28020 * that the TOC header should be returned
28023 28021 */
28024 28022 cdb[6] = 0x00;
28025 28023 /*
28026 28024 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
28027 28025 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
28028 28026 */
28029 28027 cdb[8] = 0x04;
28030 28028 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28031 28029 com->uscsi_cdb = cdb;
28032 28030 com->uscsi_cdblen = CDB_GROUP1;
28033 28031 com->uscsi_bufaddr = buffer;
28034 28032 com->uscsi_buflen = 0x04;
28035 28033 com->uscsi_timeout = 300;
28036 28034 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28037 28035
28038 28036 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
28039 28037 SD_PATH_STANDARD);
28040 28038 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28041 28039 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28042 28040 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28043 28041 } else {
28044 28042 hdr->cdth_trk0 = buffer[2];
28045 28043 hdr->cdth_trk1 = buffer[3];
28046 28044 }
28047 28045 kmem_free(buffer, 4);
28048 28046 kmem_free(com, sizeof (*com));
28049 28047 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
28050 28048 return (EFAULT);
28051 28049 }
28052 28050 return (rval);
28053 28051 }
28054 28052
28055 28053
28056 28054 /*
28057 28055 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
28058 28056 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
28059 28057 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
28060 28058 * digital audio and extended architecture digital audio. These modes are
28061 28059 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
28062 28060 * MMC specs.
28063 28061 *
28064 28062 * In addition to support for the various data formats these routines also
28065 28063 * include support for devices that implement only the direct access READ
28066 28064 * commands (0x08, 0x28), devices that implement the READ_CD commands
28067 28065 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
28068 28066 * READ CDXA commands (0xD8, 0xDB)
28069 28067 */
28070 28068
28071 28069 /*
28072 28070 * Function: sr_read_mode1()
28073 28071 *
28074 28072 * Description: This routine is the driver entry point for handling CD-ROM
28075 28073 * ioctl read mode1 requests (CDROMREADMODE1).
28076 28074 *
28077 28075 * Arguments: dev - the device 'dev_t'
28078 28076 * data - pointer to user provided cd read structure specifying
28079 28077 * the lba buffer address and length.
28080 28078 * flag - this argument is a pass through to ddi_copyxxx()
28081 28079 * directly from the mode argument of ioctl().
28082 28080 *
28083 28081 * Return Code: the code returned by sd_send_scsi_cmd()
28084 28082 * EFAULT if ddi_copyxxx() fails
28085 28083 * ENXIO if fail ddi_get_soft_state
28086 28084 * EINVAL if data pointer is NULL
28087 28085 */
28088 28086
28089 28087 static int
28090 28088 sr_read_mode1(dev_t dev, caddr_t data, int flag)
28091 28089 {
28092 28090 struct sd_lun *un;
28093 28091 struct cdrom_read mode1_struct;
28094 28092 struct cdrom_read *mode1 = &mode1_struct;
28095 28093 int rval;
28096 28094 sd_ssc_t *ssc;
28097 28095
28098 28096 #ifdef _MULTI_DATAMODEL
28099 28097 /* To support ILP32 applications in an LP64 world */
28100 28098 struct cdrom_read32 cdrom_read32;
28101 28099 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28102 28100 #endif /* _MULTI_DATAMODEL */
28103 28101
28104 28102 if (data == NULL) {
28105 28103 return (EINVAL);
28106 28104 }
28107 28105
28108 28106 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28109 28107 (un->un_state == SD_STATE_OFFLINE)) {
28110 28108 return (ENXIO);
28111 28109 }
28112 28110
28113 28111 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28114 28112 "sd_read_mode1: entry: un:0x%p\n", un);
28115 28113
28116 28114 #ifdef _MULTI_DATAMODEL
28117 28115 switch (ddi_model_convert_from(flag & FMODELS)) {
28118 28116 case DDI_MODEL_ILP32:
28119 28117 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28120 28118 return (EFAULT);
28121 28119 }
28122 28120 /* Convert the ILP32 uscsi data from the application to LP64 */
28123 28121 cdrom_read32tocdrom_read(cdrd32, mode1);
28124 28122 break;
28125 28123 case DDI_MODEL_NONE:
28126 28124 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28127 28125 return (EFAULT);
28128 28126 }
28129 28127 }
28130 28128 #else /* ! _MULTI_DATAMODEL */
28131 28129 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28132 28130 return (EFAULT);
28133 28131 }
28134 28132 #endif /* _MULTI_DATAMODEL */
28135 28133
28136 28134 ssc = sd_ssc_init(un);
28137 28135 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
28138 28136 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
28139 28137 sd_ssc_fini(ssc);
28140 28138
28141 28139 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28142 28140 "sd_read_mode1: exit: un:0x%p\n", un);
28143 28141
28144 28142 return (rval);
28145 28143 }
28146 28144
28147 28145
28148 28146 /*
28149 28147 * Function: sr_read_cd_mode2()
28150 28148 *
28151 28149 * Description: This routine is the driver entry point for handling CD-ROM
28152 28150 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28153 28151 * support the READ CD (0xBE) command or the 1st generation
28154 28152 * READ CD (0xD4) command.
28155 28153 *
28156 28154 * Arguments: dev - the device 'dev_t'
28157 28155 * data - pointer to user provided cd read structure specifying
28158 28156 * the lba buffer address and length.
28159 28157 * flag - this argument is a pass through to ddi_copyxxx()
28160 28158 * directly from the mode argument of ioctl().
28161 28159 *
28162 28160 * Return Code: the code returned by sd_send_scsi_cmd()
28163 28161 * EFAULT if ddi_copyxxx() fails
28164 28162 * ENXIO if fail ddi_get_soft_state
28165 28163 * EINVAL if data pointer is NULL
28166 28164 */
28167 28165
28168 28166 static int
28169 28167 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
28170 28168 {
28171 28169 struct sd_lun *un;
28172 28170 struct uscsi_cmd *com;
28173 28171 struct cdrom_read mode2_struct;
28174 28172 struct cdrom_read *mode2 = &mode2_struct;
28175 28173 uchar_t cdb[CDB_GROUP5];
28176 28174 int nblocks;
28177 28175 int rval;
28178 28176 #ifdef _MULTI_DATAMODEL
28179 28177 /* To support ILP32 applications in an LP64 world */
28180 28178 struct cdrom_read32 cdrom_read32;
28181 28179 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28182 28180 #endif /* _MULTI_DATAMODEL */
28183 28181
28184 28182 if (data == NULL) {
28185 28183 return (EINVAL);
28186 28184 }
28187 28185
28188 28186 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28189 28187 (un->un_state == SD_STATE_OFFLINE)) {
28190 28188 return (ENXIO);
28191 28189 }
28192 28190
28193 28191 #ifdef _MULTI_DATAMODEL
28194 28192 switch (ddi_model_convert_from(flag & FMODELS)) {
28195 28193 case DDI_MODEL_ILP32:
28196 28194 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28197 28195 return (EFAULT);
28198 28196 }
28199 28197 /* Convert the ILP32 uscsi data from the application to LP64 */
28200 28198 cdrom_read32tocdrom_read(cdrd32, mode2);
28201 28199 break;
28202 28200 case DDI_MODEL_NONE:
28203 28201 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28204 28202 return (EFAULT);
28205 28203 }
28206 28204 break;
28207 28205 }
28208 28206
28209 28207 #else /* ! _MULTI_DATAMODEL */
28210 28208 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28211 28209 return (EFAULT);
28212 28210 }
28213 28211 #endif /* _MULTI_DATAMODEL */
28214 28212
28215 28213 bzero(cdb, sizeof (cdb));
28216 28214 if (un->un_f_cfg_read_cd_xd4 == TRUE) {
28217 28215 /* Read command supported by 1st generation atapi drives */
28218 28216 cdb[0] = SCMD_READ_CDD4;
28219 28217 } else {
28220 28218 /* Universal CD Access Command */
28221 28219 cdb[0] = SCMD_READ_CD;
28222 28220 }
28223 28221
28224 28222 /*
28225 28223 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28226 28224 */
28227 28225 cdb[1] = CDROM_SECTOR_TYPE_MODE2;
28228 28226
28229 28227 /* set the start address */
28230 28228 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
28231 28229 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
28232 28230 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28233 28231 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
28234 28232
28235 28233 /* set the transfer length */
28236 28234 nblocks = mode2->cdread_buflen / 2336;
28237 28235 cdb[6] = (uchar_t)(nblocks >> 16);
28238 28236 cdb[7] = (uchar_t)(nblocks >> 8);
28239 28237 cdb[8] = (uchar_t)nblocks;
28240 28238
28241 28239 /* set the filter bits */
28242 28240 cdb[9] = CDROM_READ_CD_USERDATA;
28243 28241
28244 28242 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28245 28243 com->uscsi_cdb = (caddr_t)cdb;
28246 28244 com->uscsi_cdblen = sizeof (cdb);
28247 28245 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28248 28246 com->uscsi_buflen = mode2->cdread_buflen;
28249 28247 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28250 28248
28251 28249 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28252 28250 SD_PATH_STANDARD);
28253 28251 kmem_free(com, sizeof (*com));
28254 28252 return (rval);
28255 28253 }
28256 28254
28257 28255
28258 28256 /*
28259 28257 * Function: sr_read_mode2()
28260 28258 *
28261 28259 * Description: This routine is the driver entry point for handling CD-ROM
28262 28260 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28263 28261 * do not support the READ CD (0xBE) command.
28264 28262 *
28265 28263 * Arguments: dev - the device 'dev_t'
28266 28264 * data - pointer to user provided cd read structure specifying
28267 28265 * the lba buffer address and length.
28268 28266 * flag - this argument is a pass through to ddi_copyxxx()
28269 28267 * directly from the mode argument of ioctl().
28270 28268 *
28271 28269 * Return Code: the code returned by sd_send_scsi_cmd()
28272 28270 * EFAULT if ddi_copyxxx() fails
28273 28271 * ENXIO if fail ddi_get_soft_state
28274 28272 * EINVAL if data pointer is NULL
28275 28273 * EIO if fail to reset block size
28276 28274 * EAGAIN if commands are in progress in the driver
28277 28275 */
28278 28276
28279 28277 static int
28280 28278 sr_read_mode2(dev_t dev, caddr_t data, int flag)
28281 28279 {
28282 28280 struct sd_lun *un;
28283 28281 struct cdrom_read mode2_struct;
28284 28282 struct cdrom_read *mode2 = &mode2_struct;
28285 28283 int rval;
28286 28284 uint32_t restore_blksize;
28287 28285 struct uscsi_cmd *com;
28288 28286 uchar_t cdb[CDB_GROUP0];
28289 28287 int nblocks;
28290 28288
28291 28289 #ifdef _MULTI_DATAMODEL
28292 28290 /* To support ILP32 applications in an LP64 world */
28293 28291 struct cdrom_read32 cdrom_read32;
28294 28292 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28295 28293 #endif /* _MULTI_DATAMODEL */
28296 28294
28297 28295 if (data == NULL) {
28298 28296 return (EINVAL);
28299 28297 }
28300 28298
28301 28299 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28302 28300 (un->un_state == SD_STATE_OFFLINE)) {
28303 28301 return (ENXIO);
28304 28302 }
28305 28303
28306 28304 /*
28307 28305 * Because this routine will update the device and driver block size
28308 28306 * being used we want to make sure there are no commands in progress.
28309 28307 * If commands are in progress the user will have to try again.
28310 28308 *
28311 28309 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28312 28310 * in sdioctl to protect commands from sdioctl through to the top of
28313 28311 * sd_uscsi_strategy. See sdioctl for details.
28314 28312 */
28315 28313 mutex_enter(SD_MUTEX(un));
28316 28314 if (un->un_ncmds_in_driver != 1) {
28317 28315 mutex_exit(SD_MUTEX(un));
28318 28316 return (EAGAIN);
28319 28317 }
28320 28318 mutex_exit(SD_MUTEX(un));
28321 28319
28322 28320 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28323 28321 "sd_read_mode2: entry: un:0x%p\n", un);
28324 28322
28325 28323 #ifdef _MULTI_DATAMODEL
28326 28324 switch (ddi_model_convert_from(flag & FMODELS)) {
28327 28325 case DDI_MODEL_ILP32:
28328 28326 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28329 28327 return (EFAULT);
28330 28328 }
28331 28329 /* Convert the ILP32 uscsi data from the application to LP64 */
28332 28330 cdrom_read32tocdrom_read(cdrd32, mode2);
28333 28331 break;
28334 28332 case DDI_MODEL_NONE:
28335 28333 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28336 28334 return (EFAULT);
28337 28335 }
28338 28336 break;
28339 28337 }
28340 28338 #else /* ! _MULTI_DATAMODEL */
28341 28339 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
28342 28340 return (EFAULT);
28343 28341 }
28344 28342 #endif /* _MULTI_DATAMODEL */
28345 28343
28346 28344 /* Store the current target block size for restoration later */
28347 28345 restore_blksize = un->un_tgt_blocksize;
28348 28346
28349 28347 /* Change the device and soft state target block size to 2336 */
28350 28348 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
28351 28349 rval = EIO;
28352 28350 goto done;
28353 28351 }
28354 28352
28355 28353
28356 28354 bzero(cdb, sizeof (cdb));
28357 28355
28358 28356 /* set READ operation */
28359 28357 cdb[0] = SCMD_READ;
28360 28358
28361 28359 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28362 28360 mode2->cdread_lba >>= 2;
28363 28361
28364 28362 /* set the start address */
28365 28363 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
28366 28364 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28367 28365 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
28368 28366
28369 28367 /* set the transfer length */
28370 28368 nblocks = mode2->cdread_buflen / 2336;
28371 28369 cdb[4] = (uchar_t)nblocks & 0xFF;
28372 28370
28373 28371 /* build command */
28374 28372 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28375 28373 com->uscsi_cdb = (caddr_t)cdb;
28376 28374 com->uscsi_cdblen = sizeof (cdb);
28377 28375 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28378 28376 com->uscsi_buflen = mode2->cdread_buflen;
28379 28377 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28380 28378
28381 28379 /*
28382 28380 * Issue SCSI command with user space address for read buffer.
28383 28381 *
28384 28382 * This sends the command through main channel in the driver.
28385 28383 *
28386 28384 * Since this is accessed via an IOCTL call, we go through the
28387 28385 * standard path, so that if the device was powered down, then
28388 28386 * it would be 'awakened' to handle the command.
28389 28387 */
28390 28388 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28391 28389 SD_PATH_STANDARD);
28392 28390
28393 28391 kmem_free(com, sizeof (*com));
28394 28392
28395 28393 /* Restore the device and soft state target block size */
28396 28394 if (sr_sector_mode(dev, restore_blksize) != 0) {
28397 28395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28398 28396 "can't do switch back to mode 1\n");
28399 28397 /*
28400 28398 * If sd_send_scsi_READ succeeded we still need to report
28401 28399 * an error because we failed to reset the block size
28402 28400 */
28403 28401 if (rval == 0) {
28404 28402 rval = EIO;
28405 28403 }
28406 28404 }
28407 28405
28408 28406 done:
28409 28407 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28410 28408 "sd_read_mode2: exit: un:0x%p\n", un);
28411 28409
28412 28410 return (rval);
28413 28411 }
28414 28412
28415 28413
28416 28414 /*
28417 28415 * Function: sr_sector_mode()
28418 28416 *
28419 28417 * Description: This utility function is used by sr_read_mode2 to set the target
28420 28418 * block size based on the user specified size. This is a legacy
28421 28419 * implementation based upon a vendor specific mode page
28422 28420 *
28423 28421 * Arguments: dev - the device 'dev_t'
28424 28422 * data - flag indicating if block size is being set to 2336 or
28425 28423 * 512.
28426 28424 *
28427 28425 * Return Code: the code returned by sd_send_scsi_cmd()
28428 28426 * EFAULT if ddi_copyxxx() fails
28429 28427 * ENXIO if fail ddi_get_soft_state
28430 28428 * EINVAL if data pointer is NULL
28431 28429 */
28432 28430
28433 28431 static int
28434 28432 sr_sector_mode(dev_t dev, uint32_t blksize)
28435 28433 {
28436 28434 struct sd_lun *un;
28437 28435 uchar_t *sense;
28438 28436 uchar_t *select;
28439 28437 int rval;
28440 28438 sd_ssc_t *ssc;
28441 28439
28442 28440 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28443 28441 (un->un_state == SD_STATE_OFFLINE)) {
28444 28442 return (ENXIO);
28445 28443 }
28446 28444
28447 28445 sense = kmem_zalloc(20, KM_SLEEP);
28448 28446
28449 28447 /* Note: This is a vendor specific mode page (0x81) */
28450 28448 ssc = sd_ssc_init(un);
28451 28449 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
28452 28450 SD_PATH_STANDARD);
28453 28451 sd_ssc_fini(ssc);
28454 28452 if (rval != 0) {
28455 28453 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28456 28454 "sr_sector_mode: Mode Sense failed\n");
28457 28455 kmem_free(sense, 20);
28458 28456 return (rval);
28459 28457 }
28460 28458 select = kmem_zalloc(20, KM_SLEEP);
28461 28459 select[3] = 0x08;
28462 28460 select[10] = ((blksize >> 8) & 0xff);
28463 28461 select[11] = (blksize & 0xff);
28464 28462 select[12] = 0x01;
28465 28463 select[13] = 0x06;
28466 28464 select[14] = sense[14];
28467 28465 select[15] = sense[15];
28468 28466 if (blksize == SD_MODE2_BLKSIZE) {
28469 28467 select[14] |= 0x01;
28470 28468 }
28471 28469
28472 28470 ssc = sd_ssc_init(un);
28473 28471 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
28474 28472 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
28475 28473 sd_ssc_fini(ssc);
28476 28474 if (rval != 0) {
28477 28475 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28478 28476 "sr_sector_mode: Mode Select failed\n");
28479 28477 } else {
28480 28478 /*
28481 28479 * Only update the softstate block size if we successfully
28482 28480 * changed the device block mode.
28483 28481 */
28484 28482 mutex_enter(SD_MUTEX(un));
28485 28483 sd_update_block_info(un, blksize, 0);
28486 28484 mutex_exit(SD_MUTEX(un));
28487 28485 }
28488 28486 kmem_free(sense, 20);
28489 28487 kmem_free(select, 20);
28490 28488 return (rval);
28491 28489 }
28492 28490
28493 28491
28494 28492 /*
28495 28493 * Function: sr_read_cdda()
28496 28494 *
28497 28495 * Description: This routine is the driver entry point for handling CD-ROM
28498 28496 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28499 28497 * the target supports CDDA these requests are handled via a vendor
28500 28498 * specific command (0xD8) If the target does not support CDDA
28501 28499 * these requests are handled via the READ CD command (0xBE).
28502 28500 *
28503 28501 * Arguments: dev - the device 'dev_t'
28504 28502 * data - pointer to user provided CD-DA structure specifying
28505 28503 * the track starting address, transfer length, and
28506 28504 * subcode options.
28507 28505 * flag - this argument is a pass through to ddi_copyxxx()
28508 28506 * directly from the mode argument of ioctl().
28509 28507 *
28510 28508 * Return Code: the code returned by sd_send_scsi_cmd()
28511 28509 * EFAULT if ddi_copyxxx() fails
28512 28510 * ENXIO if fail ddi_get_soft_state
28513 28511 * EINVAL if invalid arguments are provided
28514 28512 * ENOTTY
28515 28513 */
28516 28514
28517 28515 static int
28518 28516 sr_read_cdda(dev_t dev, caddr_t data, int flag)
28519 28517 {
28520 28518 struct sd_lun *un;
28521 28519 struct uscsi_cmd *com;
28522 28520 struct cdrom_cdda *cdda;
28523 28521 int rval;
28524 28522 size_t buflen;
28525 28523 char cdb[CDB_GROUP5];
28526 28524
28527 28525 #ifdef _MULTI_DATAMODEL
28528 28526 /* To support ILP32 applications in an LP64 world */
28529 28527 struct cdrom_cdda32 cdrom_cdda32;
28530 28528 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
28531 28529 #endif /* _MULTI_DATAMODEL */
28532 28530
28533 28531 if (data == NULL) {
28534 28532 return (EINVAL);
28535 28533 }
28536 28534
28537 28535 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28538 28536 return (ENXIO);
28539 28537 }
28540 28538
28541 28539 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
28542 28540
28543 28541 #ifdef _MULTI_DATAMODEL
28544 28542 switch (ddi_model_convert_from(flag & FMODELS)) {
28545 28543 case DDI_MODEL_ILP32:
28546 28544 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
28547 28545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28548 28546 "sr_read_cdda: ddi_copyin Failed\n");
28549 28547 kmem_free(cdda, sizeof (struct cdrom_cdda));
28550 28548 return (EFAULT);
28551 28549 }
28552 28550 /* Convert the ILP32 uscsi data from the application to LP64 */
28553 28551 cdrom_cdda32tocdrom_cdda(cdda32, cdda);
28554 28552 break;
28555 28553 case DDI_MODEL_NONE:
28556 28554 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28557 28555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28558 28556 "sr_read_cdda: ddi_copyin Failed\n");
28559 28557 kmem_free(cdda, sizeof (struct cdrom_cdda));
28560 28558 return (EFAULT);
28561 28559 }
28562 28560 break;
28563 28561 }
28564 28562 #else /* ! _MULTI_DATAMODEL */
28565 28563 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28566 28564 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28567 28565 "sr_read_cdda: ddi_copyin Failed\n");
28568 28566 kmem_free(cdda, sizeof (struct cdrom_cdda));
28569 28567 return (EFAULT);
28570 28568 }
28571 28569 #endif /* _MULTI_DATAMODEL */
28572 28570
28573 28571 /*
28574 28572 * Since MMC-2 expects max 3 bytes for length, check if the
28575 28573 * length input is greater than 3 bytes
28576 28574 */
28577 28575 if ((cdda->cdda_length & 0xFF000000) != 0) {
28578 28576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
28579 28577 "cdrom transfer length too large: %d (limit %d)\n",
28580 28578 cdda->cdda_length, 0xFFFFFF);
28581 28579 kmem_free(cdda, sizeof (struct cdrom_cdda));
28582 28580 return (EINVAL);
28583 28581 }
28584 28582
28585 28583 switch (cdda->cdda_subcode) {
28586 28584 case CDROM_DA_NO_SUBCODE:
28587 28585 buflen = CDROM_BLK_2352 * cdda->cdda_length;
28588 28586 break;
28589 28587 case CDROM_DA_SUBQ:
28590 28588 buflen = CDROM_BLK_2368 * cdda->cdda_length;
28591 28589 break;
28592 28590 case CDROM_DA_ALL_SUBCODE:
28593 28591 buflen = CDROM_BLK_2448 * cdda->cdda_length;
28594 28592 break;
28595 28593 case CDROM_DA_SUBCODE_ONLY:
28596 28594 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
28597 28595 break;
28598 28596 default:
28599 28597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28600 28598 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28601 28599 cdda->cdda_subcode);
28602 28600 kmem_free(cdda, sizeof (struct cdrom_cdda));
28603 28601 return (EINVAL);
28604 28602 }
28605 28603
28606 28604 /* Build and send the command */
28607 28605 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28608 28606 bzero(cdb, CDB_GROUP5);
28609 28607
28610 28608 if (un->un_f_cfg_cdda == TRUE) {
28611 28609 cdb[0] = (char)SCMD_READ_CD;
28612 28610 cdb[1] = 0x04;
28613 28611 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28614 28612 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28615 28613 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28616 28614 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28617 28615 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28618 28616 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28619 28617 cdb[8] = ((cdda->cdda_length) & 0x000000ff);
28620 28618 cdb[9] = 0x10;
28621 28619 switch (cdda->cdda_subcode) {
28622 28620 case CDROM_DA_NO_SUBCODE :
28623 28621 cdb[10] = 0x0;
28624 28622 break;
28625 28623 case CDROM_DA_SUBQ :
28626 28624 cdb[10] = 0x2;
28627 28625 break;
28628 28626 case CDROM_DA_ALL_SUBCODE :
28629 28627 cdb[10] = 0x1;
28630 28628 break;
28631 28629 case CDROM_DA_SUBCODE_ONLY :
28632 28630 /* FALLTHROUGH */
28633 28631 default :
28634 28632 kmem_free(cdda, sizeof (struct cdrom_cdda));
28635 28633 kmem_free(com, sizeof (*com));
28636 28634 return (ENOTTY);
28637 28635 }
28638 28636 } else {
28639 28637 cdb[0] = (char)SCMD_READ_CDDA;
28640 28638 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28641 28639 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28642 28640 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28643 28641 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28644 28642 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
28645 28643 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28646 28644 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28647 28645 cdb[9] = ((cdda->cdda_length) & 0x000000ff);
28648 28646 cdb[10] = cdda->cdda_subcode;
28649 28647 }
28650 28648
28651 28649 com->uscsi_cdb = cdb;
28652 28650 com->uscsi_cdblen = CDB_GROUP5;
28653 28651 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
28654 28652 com->uscsi_buflen = buflen;
28655 28653 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28656 28654
28657 28655 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28658 28656 SD_PATH_STANDARD);
28659 28657
28660 28658 kmem_free(cdda, sizeof (struct cdrom_cdda));
28661 28659 kmem_free(com, sizeof (*com));
28662 28660 return (rval);
28663 28661 }
28664 28662
28665 28663
28666 28664 /*
28667 28665 * Function: sr_read_cdxa()
28668 28666 *
28669 28667 * Description: This routine is the driver entry point for handling CD-ROM
28670 28668 * ioctl requests to return CD-XA (Extended Architecture) data.
28671 28669 * (CDROMCDXA).
28672 28670 *
28673 28671 * Arguments: dev - the device 'dev_t'
28674 28672 * data - pointer to user provided CD-XA structure specifying
28675 28673 * the data starting address, transfer length, and format
28676 28674 * flag - this argument is a pass through to ddi_copyxxx()
28677 28675 * directly from the mode argument of ioctl().
28678 28676 *
28679 28677 * Return Code: the code returned by sd_send_scsi_cmd()
28680 28678 * EFAULT if ddi_copyxxx() fails
28681 28679 * ENXIO if fail ddi_get_soft_state
28682 28680 * EINVAL if data pointer is NULL
28683 28681 */
28684 28682
28685 28683 static int
28686 28684 sr_read_cdxa(dev_t dev, caddr_t data, int flag)
28687 28685 {
28688 28686 struct sd_lun *un;
28689 28687 struct uscsi_cmd *com;
28690 28688 struct cdrom_cdxa *cdxa;
28691 28689 int rval;
28692 28690 size_t buflen;
28693 28691 char cdb[CDB_GROUP5];
28694 28692 uchar_t read_flags;
28695 28693
28696 28694 #ifdef _MULTI_DATAMODEL
28697 28695 /* To support ILP32 applications in an LP64 world */
28698 28696 struct cdrom_cdxa32 cdrom_cdxa32;
28699 28697 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
28700 28698 #endif /* _MULTI_DATAMODEL */
28701 28699
28702 28700 if (data == NULL) {
28703 28701 return (EINVAL);
28704 28702 }
28705 28703
28706 28704 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28707 28705 return (ENXIO);
28708 28706 }
28709 28707
28710 28708 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
28711 28709
28712 28710 #ifdef _MULTI_DATAMODEL
28713 28711 switch (ddi_model_convert_from(flag & FMODELS)) {
28714 28712 case DDI_MODEL_ILP32:
28715 28713 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
28716 28714 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28717 28715 return (EFAULT);
28718 28716 }
28719 28717 /*
28720 28718 * Convert the ILP32 uscsi data from the
28721 28719 * application to LP64 for internal use.
28722 28720 */
28723 28721 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
28724 28722 break;
28725 28723 case DDI_MODEL_NONE:
28726 28724 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28727 28725 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28728 28726 return (EFAULT);
28729 28727 }
28730 28728 break;
28731 28729 }
28732 28730 #else /* ! _MULTI_DATAMODEL */
28733 28731 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28734 28732 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28735 28733 return (EFAULT);
28736 28734 }
28737 28735 #endif /* _MULTI_DATAMODEL */
28738 28736
28739 28737 /*
28740 28738 * Since MMC-2 expects max 3 bytes for length, check if the
28741 28739 * length input is greater than 3 bytes
28742 28740 */
28743 28741 if ((cdxa->cdxa_length & 0xFF000000) != 0) {
28744 28742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
28745 28743 "cdrom transfer length too large: %d (limit %d)\n",
28746 28744 cdxa->cdxa_length, 0xFFFFFF);
28747 28745 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28748 28746 return (EINVAL);
28749 28747 }
28750 28748
28751 28749 switch (cdxa->cdxa_format) {
28752 28750 case CDROM_XA_DATA:
28753 28751 buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
28754 28752 read_flags = 0x10;
28755 28753 break;
28756 28754 case CDROM_XA_SECTOR_DATA:
28757 28755 buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
28758 28756 read_flags = 0xf8;
28759 28757 break;
28760 28758 case CDROM_XA_DATA_W_ERROR:
28761 28759 buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
28762 28760 read_flags = 0xfc;
28763 28761 break;
28764 28762 default:
28765 28763 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28766 28764 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28767 28765 cdxa->cdxa_format);
28768 28766 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28769 28767 return (EINVAL);
28770 28768 }
28771 28769
28772 28770 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28773 28771 bzero(cdb, CDB_GROUP5);
28774 28772 if (un->un_f_mmc_cap == TRUE) {
28775 28773 cdb[0] = (char)SCMD_READ_CD;
28776 28774 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28777 28775 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28778 28776 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28779 28777 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28780 28778 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28781 28779 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28782 28780 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
28783 28781 cdb[9] = (char)read_flags;
28784 28782 } else {
28785 28783 /*
28786 28784 * Note: A vendor specific command (0xDB) is being used her to
28787 28785 * request a read of all subcodes.
28788 28786 */
28789 28787 cdb[0] = (char)SCMD_READ_CDXA;
28790 28788 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28791 28789 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28792 28790 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28793 28791 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28794 28792 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
28795 28793 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28796 28794 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28797 28795 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
28798 28796 cdb[10] = cdxa->cdxa_format;
28799 28797 }
28800 28798 com->uscsi_cdb = cdb;
28801 28799 com->uscsi_cdblen = CDB_GROUP5;
28802 28800 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
28803 28801 com->uscsi_buflen = buflen;
28804 28802 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28805 28803 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28806 28804 SD_PATH_STANDARD);
28807 28805 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28808 28806 kmem_free(com, sizeof (*com));
28809 28807 return (rval);
28810 28808 }
28811 28809
28812 28810
28813 28811 /*
28814 28812 * Function: sr_eject()
28815 28813 *
28816 28814 * Description: This routine is the driver entry point for handling CD-ROM
28817 28815 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28818 28816 *
28819 28817 * Arguments: dev - the device 'dev_t'
28820 28818 *
28821 28819 * Return Code: the code returned by sd_send_scsi_cmd()
28822 28820 */
28823 28821
28824 28822 static int
28825 28823 sr_eject(dev_t dev)
28826 28824 {
28827 28825 struct sd_lun *un;
28828 28826 int rval;
28829 28827 sd_ssc_t *ssc;
28830 28828
28831 28829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28832 28830 (un->un_state == SD_STATE_OFFLINE)) {
28833 28831 return (ENXIO);
28834 28832 }
28835 28833
28836 28834 /*
28837 28835 * To prevent race conditions with the eject
28838 28836 * command, keep track of an eject command as
28839 28837 * it progresses. If we are already handling
28840 28838 * an eject command in the driver for the given
28841 28839 * unit and another request to eject is received
28842 28840 * immediately return EAGAIN so we don't lose
28843 28841 * the command if the current eject command fails.
28844 28842 */
28845 28843 mutex_enter(SD_MUTEX(un));
28846 28844 if (un->un_f_ejecting == TRUE) {
28847 28845 mutex_exit(SD_MUTEX(un));
28848 28846 return (EAGAIN);
28849 28847 }
28850 28848 un->un_f_ejecting = TRUE;
28851 28849 mutex_exit(SD_MUTEX(un));
28852 28850
28853 28851 ssc = sd_ssc_init(un);
28854 28852 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
28855 28853 SD_PATH_STANDARD);
28856 28854 sd_ssc_fini(ssc);
28857 28855
28858 28856 if (rval != 0) {
28859 28857 mutex_enter(SD_MUTEX(un));
28860 28858 un->un_f_ejecting = FALSE;
28861 28859 mutex_exit(SD_MUTEX(un));
28862 28860 return (rval);
28863 28861 }
28864 28862
28865 28863 ssc = sd_ssc_init(un);
28866 28864 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
28867 28865 SD_TARGET_EJECT, SD_PATH_STANDARD);
28868 28866 sd_ssc_fini(ssc);
28869 28867
28870 28868 if (rval == 0) {
28871 28869 mutex_enter(SD_MUTEX(un));
28872 28870 sr_ejected(un);
28873 28871 un->un_mediastate = DKIO_EJECTED;
28874 28872 un->un_f_ejecting = FALSE;
28875 28873 cv_broadcast(&un->un_state_cv);
28876 28874 mutex_exit(SD_MUTEX(un));
28877 28875 } else {
28878 28876 mutex_enter(SD_MUTEX(un));
28879 28877 un->un_f_ejecting = FALSE;
28880 28878 mutex_exit(SD_MUTEX(un));
28881 28879 }
28882 28880 return (rval);
28883 28881 }
28884 28882
28885 28883
28886 28884 /*
28887 28885 * Function: sr_ejected()
28888 28886 *
28889 28887 * Description: This routine updates the soft state structure to invalidate the
28890 28888 * geometry information after the media has been ejected or a
28891 28889 * media eject has been detected.
28892 28890 *
28893 28891 * Arguments: un - driver soft state (unit) structure
28894 28892 */
28895 28893
28896 28894 static void
28897 28895 sr_ejected(struct sd_lun *un)
28898 28896 {
28899 28897 struct sd_errstats *stp;
28900 28898
28901 28899 ASSERT(un != NULL);
28902 28900 ASSERT(mutex_owned(SD_MUTEX(un)));
28903 28901
28904 28902 un->un_f_blockcount_is_valid = FALSE;
28905 28903 un->un_f_tgt_blocksize_is_valid = FALSE;
28906 28904 mutex_exit(SD_MUTEX(un));
28907 28905 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
28908 28906 mutex_enter(SD_MUTEX(un));
28909 28907
28910 28908 if (un->un_errstats != NULL) {
28911 28909 stp = (struct sd_errstats *)un->un_errstats->ks_data;
28912 28910 stp->sd_capacity.value.ui64 = 0;
28913 28911 }
28914 28912 }
28915 28913
28916 28914
28917 28915 /*
28918 28916 * Function: sr_check_wp()
28919 28917 *
28920 28918 * Description: This routine checks the write protection of a removable
28921 28919 * media disk and hotpluggable devices via the write protect bit of
28922 28920 * the Mode Page Header device specific field. Some devices choke
28923 28921 * on unsupported mode page. In order to workaround this issue,
28924 28922 * this routine has been implemented to use 0x3f mode page(request
28925 28923 * for all pages) for all device types.
28926 28924 *
28927 28925 * Arguments: dev - the device 'dev_t'
28928 28926 *
28929 28927 * Return Code: int indicating if the device is write protected (1) or not (0)
28930 28928 *
28931 28929 * Context: Kernel thread.
28932 28930 *
28933 28931 */
28934 28932
28935 28933 static int
28936 28934 sr_check_wp(dev_t dev)
28937 28935 {
28938 28936 struct sd_lun *un;
28939 28937 uchar_t device_specific;
28940 28938 uchar_t *sense;
28941 28939 int hdrlen;
28942 28940 int rval = FALSE;
28943 28941 int status;
28944 28942 sd_ssc_t *ssc;
28945 28943
28946 28944 /*
28947 28945 * Note: The return codes for this routine should be reworked to
28948 28946 * properly handle the case of a NULL softstate.
28949 28947 */
28950 28948 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28951 28949 return (FALSE);
28952 28950 }
28953 28951
28954 28952 if (un->un_f_cfg_is_atapi == TRUE) {
28955 28953 /*
28956 28954 * The mode page contents are not required; set the allocation
28957 28955 * length for the mode page header only
28958 28956 */
28959 28957 hdrlen = MODE_HEADER_LENGTH_GRP2;
28960 28958 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28961 28959 ssc = sd_ssc_init(un);
28962 28960 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
28963 28961 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28964 28962 sd_ssc_fini(ssc);
28965 28963 if (status != 0)
28966 28964 goto err_exit;
28967 28965 device_specific =
28968 28966 ((struct mode_header_grp2 *)sense)->device_specific;
28969 28967 } else {
28970 28968 hdrlen = MODE_HEADER_LENGTH;
28971 28969 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28972 28970 ssc = sd_ssc_init(un);
28973 28971 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
28974 28972 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28975 28973 sd_ssc_fini(ssc);
28976 28974 if (status != 0)
28977 28975 goto err_exit;
28978 28976 device_specific =
28979 28977 ((struct mode_header *)sense)->device_specific;
28980 28978 }
28981 28979
28982 28980
28983 28981 /*
28984 28982 * Write protect mode sense failed; not all disks
28985 28983 * understand this query. Return FALSE assuming that
28986 28984 * these devices are not writable.
28987 28985 */
28988 28986 if (device_specific & WRITE_PROTECT) {
28989 28987 rval = TRUE;
28990 28988 }
28991 28989
28992 28990 err_exit:
28993 28991 kmem_free(sense, hdrlen);
28994 28992 return (rval);
28995 28993 }
28996 28994
28997 28995 /*
28998 28996 * Function: sr_volume_ctrl()
28999 28997 *
29000 28998 * Description: This routine is the driver entry point for handling CD-ROM
29001 28999 * audio output volume ioctl requests. (CDROMVOLCTRL)
29002 29000 *
29003 29001 * Arguments: dev - the device 'dev_t'
29004 29002 * data - pointer to user audio volume control structure
29005 29003 * flag - this argument is a pass through to ddi_copyxxx()
29006 29004 * directly from the mode argument of ioctl().
29007 29005 *
29008 29006 * Return Code: the code returned by sd_send_scsi_cmd()
29009 29007 * EFAULT if ddi_copyxxx() fails
29010 29008 * ENXIO if fail ddi_get_soft_state
29011 29009 * EINVAL if data pointer is NULL
29012 29010 *
29013 29011 */
29014 29012
29015 29013 static int
29016 29014 sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
29017 29015 {
29018 29016 struct sd_lun *un;
29019 29017 struct cdrom_volctrl volume;
29020 29018 struct cdrom_volctrl *vol = &volume;
29021 29019 uchar_t *sense_page;
29022 29020 uchar_t *select_page;
29023 29021 uchar_t *sense;
29024 29022 uchar_t *select;
29025 29023 int sense_buflen;
29026 29024 int select_buflen;
29027 29025 int rval;
29028 29026 sd_ssc_t *ssc;
29029 29027
29030 29028 if (data == NULL) {
29031 29029 return (EINVAL);
29032 29030 }
29033 29031
29034 29032 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29035 29033 (un->un_state == SD_STATE_OFFLINE)) {
29036 29034 return (ENXIO);
29037 29035 }
29038 29036
29039 29037 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
29040 29038 return (EFAULT);
29041 29039 }
29042 29040
29043 29041 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29044 29042 struct mode_header_grp2 *sense_mhp;
29045 29043 struct mode_header_grp2 *select_mhp;
29046 29044 int bd_len;
29047 29045
29048 29046 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
29049 29047 select_buflen = MODE_HEADER_LENGTH_GRP2 +
29050 29048 MODEPAGE_AUDIO_CTRL_LEN;
29051 29049 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29052 29050 select = kmem_zalloc(select_buflen, KM_SLEEP);
29053 29051 ssc = sd_ssc_init(un);
29054 29052 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
29055 29053 sense_buflen, MODEPAGE_AUDIO_CTRL,
29056 29054 SD_PATH_STANDARD);
29057 29055 sd_ssc_fini(ssc);
29058 29056
29059 29057 if (rval != 0) {
29060 29058 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
29061 29059 "sr_volume_ctrl: Mode Sense Failed\n");
29062 29060 kmem_free(sense, sense_buflen);
29063 29061 kmem_free(select, select_buflen);
29064 29062 return (rval);
29065 29063 }
29066 29064 sense_mhp = (struct mode_header_grp2 *)sense;
29067 29065 select_mhp = (struct mode_header_grp2 *)select;
29068 29066 bd_len = (sense_mhp->bdesc_length_hi << 8) |
29069 29067 sense_mhp->bdesc_length_lo;
29070 29068 if (bd_len > MODE_BLK_DESC_LENGTH) {
29071 29069 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29072 29070 "sr_volume_ctrl: Mode Sense returned invalid "
29073 29071 "block descriptor length\n");
29074 29072 kmem_free(sense, sense_buflen);
29075 29073 kmem_free(select, select_buflen);
29076 29074 return (EIO);
29077 29075 }
29078 29076 sense_page = (uchar_t *)
29079 29077 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
29080 29078 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
29081 29079 select_mhp->length_msb = 0;
29082 29080 select_mhp->length_lsb = 0;
29083 29081 select_mhp->bdesc_length_hi = 0;
29084 29082 select_mhp->bdesc_length_lo = 0;
29085 29083 } else {
29086 29084 struct mode_header *sense_mhp, *select_mhp;
29087 29085
29088 29086 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29089 29087 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29090 29088 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29091 29089 select = kmem_zalloc(select_buflen, KM_SLEEP);
29092 29090 ssc = sd_ssc_init(un);
29093 29091 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
29094 29092 sense_buflen, MODEPAGE_AUDIO_CTRL,
29095 29093 SD_PATH_STANDARD);
29096 29094 sd_ssc_fini(ssc);
29097 29095
29098 29096 if (rval != 0) {
29099 29097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29100 29098 "sr_volume_ctrl: Mode Sense Failed\n");
29101 29099 kmem_free(sense, sense_buflen);
29102 29100 kmem_free(select, select_buflen);
29103 29101 return (rval);
29104 29102 }
29105 29103 sense_mhp = (struct mode_header *)sense;
29106 29104 select_mhp = (struct mode_header *)select;
29107 29105 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
29108 29106 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29109 29107 "sr_volume_ctrl: Mode Sense returned invalid "
29110 29108 "block descriptor length\n");
29111 29109 kmem_free(sense, sense_buflen);
29112 29110 kmem_free(select, select_buflen);
29113 29111 return (EIO);
29114 29112 }
29115 29113 sense_page = (uchar_t *)
29116 29114 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
29117 29115 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
29118 29116 select_mhp->length = 0;
29119 29117 select_mhp->bdesc_length = 0;
29120 29118 }
29121 29119 /*
29122 29120 * Note: An audio control data structure could be created and overlayed
29123 29121 * on the following in place of the array indexing method implemented.
29124 29122 */
29125 29123
29126 29124 /* Build the select data for the user volume data */
29127 29125 select_page[0] = MODEPAGE_AUDIO_CTRL;
29128 29126 select_page[1] = 0xE;
29129 29127 /* Set the immediate bit */
29130 29128 select_page[2] = 0x04;
29131 29129 /* Zero out reserved fields */
29132 29130 select_page[3] = 0x00;
29133 29131 select_page[4] = 0x00;
29134 29132 /* Return sense data for fields not to be modified */
29135 29133 select_page[5] = sense_page[5];
29136 29134 select_page[6] = sense_page[6];
29137 29135 select_page[7] = sense_page[7];
29138 29136 /* Set the user specified volume levels for channel 0 and 1 */
29139 29137 select_page[8] = 0x01;
29140 29138 select_page[9] = vol->channel0;
29141 29139 select_page[10] = 0x02;
29142 29140 select_page[11] = vol->channel1;
29143 29141 /* Channel 2 and 3 are currently unsupported so return the sense data */
29144 29142 select_page[12] = sense_page[12];
29145 29143 select_page[13] = sense_page[13];
29146 29144 select_page[14] = sense_page[14];
29147 29145 select_page[15] = sense_page[15];
29148 29146
29149 29147 ssc = sd_ssc_init(un);
29150 29148 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29151 29149 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
29152 29150 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29153 29151 } else {
29154 29152 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
29155 29153 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29156 29154 }
29157 29155 sd_ssc_fini(ssc);
29158 29156
29159 29157 kmem_free(sense, sense_buflen);
29160 29158 kmem_free(select, select_buflen);
29161 29159 return (rval);
29162 29160 }
29163 29161
29164 29162
29165 29163 /*
29166 29164 * Function: sr_read_sony_session_offset()
29167 29165 *
29168 29166 * Description: This routine is the driver entry point for handling CD-ROM
29169 29167 * ioctl requests for session offset information. (CDROMREADOFFSET)
29170 29168 * The address of the first track in the last session of a
29171 29169 * multi-session CD-ROM is returned
29172 29170 *
29173 29171 * Note: This routine uses a vendor specific key value in the
29174 29172 * command control field without implementing any vendor check here
29175 29173 * or in the ioctl routine.
29176 29174 *
29177 29175 * Arguments: dev - the device 'dev_t'
29178 29176 * data - pointer to an int to hold the requested address
29179 29177 * flag - this argument is a pass through to ddi_copyxxx()
29180 29178 * directly from the mode argument of ioctl().
29181 29179 *
29182 29180 * Return Code: the code returned by sd_send_scsi_cmd()
29183 29181 * EFAULT if ddi_copyxxx() fails
29184 29182 * ENXIO if fail ddi_get_soft_state
29185 29183 * EINVAL if data pointer is NULL
29186 29184 */
29187 29185
29188 29186 static int
29189 29187 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
29190 29188 {
29191 29189 struct sd_lun *un;
29192 29190 struct uscsi_cmd *com;
29193 29191 caddr_t buffer;
29194 29192 char cdb[CDB_GROUP1];
29195 29193 int session_offset = 0;
29196 29194 int rval;
29197 29195
29198 29196 if (data == NULL) {
29199 29197 return (EINVAL);
29200 29198 }
29201 29199
29202 29200 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29203 29201 (un->un_state == SD_STATE_OFFLINE)) {
29204 29202 return (ENXIO);
29205 29203 }
29206 29204
29207 29205 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
29208 29206 bzero(cdb, CDB_GROUP1);
29209 29207 cdb[0] = SCMD_READ_TOC;
29210 29208 /*
29211 29209 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29212 29210 * (4 byte TOC response header + 8 byte response data)
29213 29211 */
29214 29212 cdb[8] = SONY_SESSION_OFFSET_LEN;
29215 29213 /* Byte 9 is the control byte. A vendor specific value is used */
29216 29214 cdb[9] = SONY_SESSION_OFFSET_KEY;
29217 29215 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
29218 29216 com->uscsi_cdb = cdb;
29219 29217 com->uscsi_cdblen = CDB_GROUP1;
29220 29218 com->uscsi_bufaddr = buffer;
29221 29219 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
29222 29220 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
29223 29221
29224 29222 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
29225 29223 SD_PATH_STANDARD);
29226 29224 if (rval != 0) {
29227 29225 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29228 29226 kmem_free(com, sizeof (*com));
29229 29227 return (rval);
29230 29228 }
29231 29229 if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
29232 29230 session_offset =
29233 29231 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
29234 29232 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
29235 29233 /*
29236 29234 * Offset returned offset in current lbasize block's. Convert to
29237 29235 * 2k block's to return to the user
29238 29236 */
29239 29237 if (un->un_tgt_blocksize == CDROM_BLK_512) {
29240 29238 session_offset >>= 2;
29241 29239 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
29242 29240 session_offset >>= 1;
29243 29241 }
29244 29242 }
29245 29243
29246 29244 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
29247 29245 rval = EFAULT;
29248 29246 }
29249 29247
29250 29248 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29251 29249 kmem_free(com, sizeof (*com));
29252 29250 return (rval);
29253 29251 }
29254 29252
29255 29253
29256 29254 /*
29257 29255 * Function: sd_wm_cache_constructor()
29258 29256 *
29259 29257 * Description: Cache Constructor for the wmap cache for the read/modify/write
29260 29258 * devices.
29261 29259 *
29262 29260 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29263 29261 * un - sd_lun structure for the device.
29264 29262 * flag - the km flags passed to constructor
29265 29263 *
29266 29264 * Return Code: 0 on success.
29267 29265 * -1 on failure.
29268 29266 */
29269 29267
29270 29268 /*ARGSUSED*/
29271 29269 static int
29272 29270 sd_wm_cache_constructor(void *wm, void *un, int flags)
29273 29271 {
29274 29272 bzero(wm, sizeof (struct sd_w_map));
29275 29273 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
29276 29274 return (0);
29277 29275 }
29278 29276
29279 29277
29280 29278 /*
29281 29279 * Function: sd_wm_cache_destructor()
29282 29280 *
29283 29281 * Description: Cache destructor for the wmap cache for the read/modify/write
29284 29282 * devices.
29285 29283 *
29286 29284 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29287 29285 * un - sd_lun structure for the device.
29288 29286 */
29289 29287 /*ARGSUSED*/
29290 29288 static void
29291 29289 sd_wm_cache_destructor(void *wm, void *un)
29292 29290 {
29293 29291 cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
29294 29292 }
29295 29293
29296 29294
29297 29295 /*
29298 29296 * Function: sd_range_lock()
29299 29297 *
29300 29298 * Description: Lock the range of blocks specified as parameter to ensure
29301 29299 * that read, modify write is atomic and no other i/o writes
29302 29300 * to the same location. The range is specified in terms
29303 29301 * of start and end blocks. Block numbers are the actual
29304 29302 * media block numbers and not system.
29305 29303 *
29306 29304 * Arguments: un - sd_lun structure for the device.
29307 29305 * startb - The starting block number
29308 29306 * endb - The end block number
29309 29307 * typ - type of i/o - simple/read_modify_write
29310 29308 *
29311 29309 * Return Code: wm - pointer to the wmap structure.
29312 29310 *
29313 29311 * Context: This routine can sleep.
29314 29312 */
29315 29313
29316 29314 static struct sd_w_map *
29317 29315 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
29318 29316 {
29319 29317 struct sd_w_map *wmp = NULL;
29320 29318 struct sd_w_map *sl_wmp = NULL;
29321 29319 struct sd_w_map *tmp_wmp;
29322 29320 wm_state state = SD_WM_CHK_LIST;
29323 29321
29324 29322
29325 29323 ASSERT(un != NULL);
29326 29324 ASSERT(!mutex_owned(SD_MUTEX(un)));
29327 29325
29328 29326 mutex_enter(SD_MUTEX(un));
29329 29327
29330 29328 while (state != SD_WM_DONE) {
29331 29329
29332 29330 switch (state) {
29333 29331 case SD_WM_CHK_LIST:
29334 29332 /*
29335 29333 * This is the starting state. Check the wmap list
29336 29334 * to see if the range is currently available.
29337 29335 */
29338 29336 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
29339 29337 /*
29340 29338 * If this is a simple write and no rmw
29341 29339 * i/o is pending then try to lock the
29342 29340 * range as the range should be available.
29343 29341 */
29344 29342 state = SD_WM_LOCK_RANGE;
29345 29343 } else {
29346 29344 tmp_wmp = sd_get_range(un, startb, endb);
29347 29345 if (tmp_wmp != NULL) {
29348 29346 if ((wmp != NULL) && ONLIST(un, wmp)) {
29349 29347 /*
29350 29348 * Should not keep onlist wmps
29351 29349 * while waiting this macro
29352 29350 * will also do wmp = NULL;
29353 29351 */
29354 29352 FREE_ONLIST_WMAP(un, wmp);
29355 29353 }
29356 29354 /*
29357 29355 * sl_wmp is the wmap on which wait
29358 29356 * is done, since the tmp_wmp points
29359 29357 * to the inuse wmap, set sl_wmp to
29360 29358 * tmp_wmp and change the state to sleep
29361 29359 */
29362 29360 sl_wmp = tmp_wmp;
29363 29361 state = SD_WM_WAIT_MAP;
29364 29362 } else {
29365 29363 state = SD_WM_LOCK_RANGE;
29366 29364 }
29367 29365
29368 29366 }
29369 29367 break;
29370 29368
29371 29369 case SD_WM_LOCK_RANGE:
29372 29370 ASSERT(un->un_wm_cache);
29373 29371 /*
29374 29372 * The range need to be locked, try to get a wmap.
29375 29373 * First attempt it with NO_SLEEP, want to avoid a sleep
29376 29374 * if possible as we will have to release the sd mutex
29377 29375 * if we have to sleep.
29378 29376 */
29379 29377 if (wmp == NULL)
29380 29378 wmp = kmem_cache_alloc(un->un_wm_cache,
29381 29379 KM_NOSLEEP);
29382 29380 if (wmp == NULL) {
29383 29381 mutex_exit(SD_MUTEX(un));
29384 29382 _NOTE(DATA_READABLE_WITHOUT_LOCK
29385 29383 (sd_lun::un_wm_cache))
29386 29384 wmp = kmem_cache_alloc(un->un_wm_cache,
29387 29385 KM_SLEEP);
29388 29386 mutex_enter(SD_MUTEX(un));
29389 29387 /*
29390 29388 * we released the mutex so recheck and go to
29391 29389 * check list state.
29392 29390 */
29393 29391 state = SD_WM_CHK_LIST;
29394 29392 } else {
29395 29393 /*
29396 29394 * We exit out of state machine since we
29397 29395 * have the wmap. Do the housekeeping first.
29398 29396 * place the wmap on the wmap list if it is not
29399 29397 * on it already and then set the state to done.
29400 29398 */
29401 29399 wmp->wm_start = startb;
29402 29400 wmp->wm_end = endb;
29403 29401 wmp->wm_flags = typ | SD_WM_BUSY;
29404 29402 if (typ & SD_WTYPE_RMW) {
29405 29403 un->un_rmw_count++;
29406 29404 }
29407 29405 /*
29408 29406 * If not already on the list then link
29409 29407 */
29410 29408 if (!ONLIST(un, wmp)) {
29411 29409 wmp->wm_next = un->un_wm;
29412 29410 wmp->wm_prev = NULL;
29413 29411 if (wmp->wm_next)
29414 29412 wmp->wm_next->wm_prev = wmp;
29415 29413 un->un_wm = wmp;
29416 29414 }
29417 29415 state = SD_WM_DONE;
29418 29416 }
29419 29417 break;
29420 29418
29421 29419 case SD_WM_WAIT_MAP:
29422 29420 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
29423 29421 /*
29424 29422 * Wait is done on sl_wmp, which is set in the
29425 29423 * check_list state.
29426 29424 */
29427 29425 sl_wmp->wm_wanted_count++;
29428 29426 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
29429 29427 sl_wmp->wm_wanted_count--;
29430 29428 /*
29431 29429 * We can reuse the memory from the completed sl_wmp
29432 29430 * lock range for our new lock, but only if noone is
29433 29431 * waiting for it.
29434 29432 */
29435 29433 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
29436 29434 if (sl_wmp->wm_wanted_count == 0) {
29437 29435 if (wmp != NULL)
29438 29436 CHK_N_FREEWMP(un, wmp);
29439 29437 wmp = sl_wmp;
29440 29438 }
29441 29439 sl_wmp = NULL;
29442 29440 /*
29443 29441 * After waking up, need to recheck for availability of
29444 29442 * range.
29445 29443 */
29446 29444 state = SD_WM_CHK_LIST;
29447 29445 break;
29448 29446
29449 29447 default:
29450 29448 panic("sd_range_lock: "
29451 29449 "Unknown state %d in sd_range_lock", state);
29452 29450 /*NOTREACHED*/
29453 29451 } /* switch(state) */
29454 29452
29455 29453 } /* while(state != SD_WM_DONE) */
29456 29454
29457 29455 mutex_exit(SD_MUTEX(un));
29458 29456
29459 29457 ASSERT(wmp != NULL);
29460 29458
29461 29459 return (wmp);
29462 29460 }
29463 29461
29464 29462
29465 29463 /*
29466 29464 * Function: sd_get_range()
29467 29465 *
29468 29466 * Description: Find if there any overlapping I/O to this one
29469 29467 * Returns the write-map of 1st such I/O, NULL otherwise.
29470 29468 *
29471 29469 * Arguments: un - sd_lun structure for the device.
29472 29470 * startb - The starting block number
29473 29471 * endb - The end block number
29474 29472 *
29475 29473 * Return Code: wm - pointer to the wmap structure.
29476 29474 */
29477 29475
29478 29476 static struct sd_w_map *
29479 29477 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
29480 29478 {
29481 29479 struct sd_w_map *wmp;
29482 29480
29483 29481 ASSERT(un != NULL);
29484 29482
29485 29483 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
29486 29484 if (!(wmp->wm_flags & SD_WM_BUSY)) {
29487 29485 continue;
29488 29486 }
29489 29487 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
29490 29488 break;
29491 29489 }
29492 29490 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
29493 29491 break;
29494 29492 }
29495 29493 }
29496 29494
29497 29495 return (wmp);
29498 29496 }
29499 29497
29500 29498
29501 29499 /*
29502 29500 * Function: sd_free_inlist_wmap()
29503 29501 *
29504 29502 * Description: Unlink and free a write map struct.
29505 29503 *
29506 29504 * Arguments: un - sd_lun structure for the device.
29507 29505 * wmp - sd_w_map which needs to be unlinked.
29508 29506 */
29509 29507
29510 29508 static void
29511 29509 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
29512 29510 {
29513 29511 ASSERT(un != NULL);
29514 29512
29515 29513 if (un->un_wm == wmp) {
29516 29514 un->un_wm = wmp->wm_next;
29517 29515 } else {
29518 29516 wmp->wm_prev->wm_next = wmp->wm_next;
29519 29517 }
29520 29518
29521 29519 if (wmp->wm_next) {
29522 29520 wmp->wm_next->wm_prev = wmp->wm_prev;
29523 29521 }
29524 29522
29525 29523 wmp->wm_next = wmp->wm_prev = NULL;
29526 29524
29527 29525 kmem_cache_free(un->un_wm_cache, wmp);
29528 29526 }
29529 29527
29530 29528
29531 29529 /*
29532 29530 * Function: sd_range_unlock()
29533 29531 *
29534 29532 * Description: Unlock the range locked by wm.
29535 29533 * Free write map if nobody else is waiting on it.
29536 29534 *
29537 29535 * Arguments: un - sd_lun structure for the device.
29538 29536 * wmp - sd_w_map which needs to be unlinked.
29539 29537 */
29540 29538
29541 29539 static void
29542 29540 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
29543 29541 {
29544 29542 ASSERT(un != NULL);
29545 29543 ASSERT(wm != NULL);
29546 29544 ASSERT(!mutex_owned(SD_MUTEX(un)));
29547 29545
29548 29546 mutex_enter(SD_MUTEX(un));
29549 29547
29550 29548 if (wm->wm_flags & SD_WTYPE_RMW) {
29551 29549 un->un_rmw_count--;
29552 29550 }
29553 29551
29554 29552 if (wm->wm_wanted_count) {
29555 29553 wm->wm_flags = 0;
29556 29554 /*
29557 29555 * Broadcast that the wmap is available now.
29558 29556 */
29559 29557 cv_broadcast(&wm->wm_avail);
29560 29558 } else {
29561 29559 /*
29562 29560 * If no one is waiting on the map, it should be free'ed.
29563 29561 */
29564 29562 sd_free_inlist_wmap(un, wm);
29565 29563 }
29566 29564
29567 29565 mutex_exit(SD_MUTEX(un));
29568 29566 }
29569 29567
29570 29568
29571 29569 /*
29572 29570 * Function: sd_read_modify_write_task
29573 29571 *
29574 29572 * Description: Called from a taskq thread to initiate the write phase of
29575 29573 * a read-modify-write request. This is used for targets where
29576 29574 * un->un_sys_blocksize != un->un_tgt_blocksize.
29577 29575 *
29578 29576 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29579 29577 *
29580 29578 * Context: Called under taskq thread context.
29581 29579 */
29582 29580
29583 29581 static void
29584 29582 sd_read_modify_write_task(void *arg)
29585 29583 {
29586 29584 struct sd_mapblocksize_info *bsp;
29587 29585 struct buf *bp;
29588 29586 struct sd_xbuf *xp;
29589 29587 struct sd_lun *un;
29590 29588
29591 29589 bp = arg; /* The bp is given in arg */
29592 29590 ASSERT(bp != NULL);
29593 29591
29594 29592 /* Get the pointer to the layer-private data struct */
29595 29593 xp = SD_GET_XBUF(bp);
29596 29594 ASSERT(xp != NULL);
29597 29595 bsp = xp->xb_private;
29598 29596 ASSERT(bsp != NULL);
29599 29597
29600 29598 un = SD_GET_UN(bp);
29601 29599 ASSERT(un != NULL);
29602 29600 ASSERT(!mutex_owned(SD_MUTEX(un)));
29603 29601
29604 29602 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29605 29603 "sd_read_modify_write_task: entry: buf:0x%p\n", bp);
29606 29604
29607 29605 /*
29608 29606 * This is the write phase of a read-modify-write request, called
29609 29607 * under the context of a taskq thread in response to the completion
29610 29608 * of the read portion of the rmw request completing under interrupt
29611 29609 * context. The write request must be sent from here down the iostart
29612 29610 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29613 29611 * we use the layer index saved in the layer-private data area.
29614 29612 */
29615 29613 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
29616 29614
29617 29615 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29618 29616 "sd_read_modify_write_task: exit: buf:0x%p\n", bp);
29619 29617 }
29620 29618
29621 29619
29622 29620 /*
29623 29621 * Function: sddump_do_read_of_rmw()
29624 29622 *
29625 29623 * Description: This routine will be called from sddump, If sddump is called
29626 29624 * with an I/O which not aligned on device blocksize boundary
29627 29625 * then the write has to be converted to read-modify-write.
29628 29626 * Do the read part here in order to keep sddump simple.
29629 29627 * Note - That the sd_mutex is held across the call to this
29630 29628 * routine.
29631 29629 *
29632 29630 * Arguments: un - sd_lun
29633 29631 * blkno - block number in terms of media block size.
29634 29632 * nblk - number of blocks.
29635 29633 * bpp - pointer to pointer to the buf structure. On return
29636 29634 * from this function, *bpp points to the valid buffer
29637 29635 * to which the write has to be done.
29638 29636 *
29639 29637 * Return Code: 0 for success or errno-type return code
29640 29638 */
29641 29639
29642 29640 static int
29643 29641 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
29644 29642 struct buf **bpp)
29645 29643 {
29646 29644 int err;
29647 29645 int i;
29648 29646 int rval;
29649 29647 struct buf *bp;
29650 29648 struct scsi_pkt *pkt = NULL;
29651 29649 uint32_t target_blocksize;
29652 29650
29653 29651 ASSERT(un != NULL);
29654 29652 ASSERT(mutex_owned(SD_MUTEX(un)));
29655 29653
29656 29654 target_blocksize = un->un_tgt_blocksize;
29657 29655
29658 29656 mutex_exit(SD_MUTEX(un));
29659 29657
29660 29658 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
29661 29659 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
29662 29660 if (bp == NULL) {
29663 29661 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29664 29662 "no resources for dumping; giving up");
29665 29663 err = ENOMEM;
29666 29664 goto done;
29667 29665 }
29668 29666
29669 29667 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
29670 29668 blkno, nblk);
29671 29669 if (rval != 0) {
29672 29670 scsi_free_consistent_buf(bp);
29673 29671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29674 29672 "no resources for dumping; giving up");
29675 29673 err = ENOMEM;
29676 29674 goto done;
29677 29675 }
29678 29676
29679 29677 pkt->pkt_flags |= FLAG_NOINTR;
29680 29678
29681 29679 err = EIO;
29682 29680 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
29683 29681
29684 29682 /*
29685 29683 * Scsi_poll returns 0 (success) if the command completes and
29686 29684 * the status block is STATUS_GOOD. We should only check
29687 29685 * errors if this condition is not true. Even then we should
29688 29686 * send our own request sense packet only if we have a check
29689 29687 * condition and auto request sense has not been performed by
29690 29688 * the hba.
29691 29689 */
29692 29690 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
29693 29691
29694 29692 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
29695 29693 err = 0;
29696 29694 break;
29697 29695 }
29698 29696
29699 29697 /*
29700 29698 * Check CMD_DEV_GONE 1st, give up if device is gone,
29701 29699 * no need to read RQS data.
29702 29700 */
29703 29701 if (pkt->pkt_reason == CMD_DEV_GONE) {
29704 29702 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29705 29703 "Error while dumping state with rmw..."
29706 29704 "Device is gone\n");
29707 29705 break;
29708 29706 }
29709 29707
29710 29708 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
29711 29709 SD_INFO(SD_LOG_DUMP, un,
29712 29710 "sddump: read failed with CHECK, try # %d\n", i);
29713 29711 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
29714 29712 (void) sd_send_polled_RQS(un);
29715 29713 }
29716 29714
29717 29715 continue;
29718 29716 }
29719 29717
29720 29718 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
29721 29719 int reset_retval = 0;
29722 29720
29723 29721 SD_INFO(SD_LOG_DUMP, un,
29724 29722 "sddump: read failed with BUSY, try # %d\n", i);
29725 29723
29726 29724 if (un->un_f_lun_reset_enabled == TRUE) {
29727 29725 reset_retval = scsi_reset(SD_ADDRESS(un),
29728 29726 RESET_LUN);
29729 29727 }
29730 29728 if (reset_retval == 0) {
29731 29729 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
29732 29730 }
29733 29731 (void) sd_send_polled_RQS(un);
29734 29732
29735 29733 } else {
29736 29734 SD_INFO(SD_LOG_DUMP, un,
29737 29735 "sddump: read failed with 0x%x, try # %d\n",
29738 29736 SD_GET_PKT_STATUS(pkt), i);
29739 29737 mutex_enter(SD_MUTEX(un));
29740 29738 sd_reset_target(un, pkt);
29741 29739 mutex_exit(SD_MUTEX(un));
29742 29740 }
29743 29741
29744 29742 /*
29745 29743 * If we are not getting anywhere with lun/target resets,
29746 29744 * let's reset the bus.
29747 29745 */
29748 29746 if (i > SD_NDUMP_RETRIES/2) {
29749 29747 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
29750 29748 (void) sd_send_polled_RQS(un);
29751 29749 }
29752 29750
29753 29751 }
29754 29752 scsi_destroy_pkt(pkt);
29755 29753
29756 29754 if (err != 0) {
29757 29755 scsi_free_consistent_buf(bp);
29758 29756 *bpp = NULL;
29759 29757 } else {
29760 29758 *bpp = bp;
29761 29759 }
29762 29760
29763 29761 done:
29764 29762 mutex_enter(SD_MUTEX(un));
29765 29763 return (err);
29766 29764 }
29767 29765
29768 29766
29769 29767 /*
29770 29768 * Function: sd_failfast_flushq
29771 29769 *
29772 29770 * Description: Take all bp's on the wait queue that have B_FAILFAST set
29773 29771 * in b_flags and move them onto the failfast queue, then kick
29774 29772 * off a thread to return all bp's on the failfast queue to
29775 29773 * their owners with an error set.
29776 29774 *
29777 29775 * Arguments: un - pointer to the soft state struct for the instance.
29778 29776 *
29779 29777 * Context: may execute in interrupt context.
29780 29778 */
29781 29779
29782 29780 static void
29783 29781 sd_failfast_flushq(struct sd_lun *un)
29784 29782 {
29785 29783 struct buf *bp;
29786 29784 struct buf *next_waitq_bp;
29787 29785 struct buf *prev_waitq_bp = NULL;
29788 29786
29789 29787 ASSERT(un != NULL);
29790 29788 ASSERT(mutex_owned(SD_MUTEX(un)));
29791 29789 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29792 29790 ASSERT(un->un_failfast_bp == NULL);
29793 29791
29794 29792 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29795 29793 "sd_failfast_flushq: entry: un:0x%p\n", un);
29796 29794
29797 29795 /*
29798 29796 * Check if we should flush all bufs when entering failfast state, or
29799 29797 * just those with B_FAILFAST set.
29800 29798 */
29801 29799 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
29802 29800 /*
29803 29801 * Move *all* bp's on the wait queue to the failfast flush
29804 29802 * queue, including those that do NOT have B_FAILFAST set.
29805 29803 */
29806 29804 if (un->un_failfast_headp == NULL) {
29807 29805 ASSERT(un->un_failfast_tailp == NULL);
29808 29806 un->un_failfast_headp = un->un_waitq_headp;
29809 29807 } else {
29810 29808 ASSERT(un->un_failfast_tailp != NULL);
29811 29809 un->un_failfast_tailp->av_forw = un->un_waitq_headp;
29812 29810 }
29813 29811
29814 29812 un->un_failfast_tailp = un->un_waitq_tailp;
29815 29813
29816 29814 /* update kstat for each bp moved out of the waitq */
29817 29815 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
29818 29816 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29819 29817 }
29820 29818
29821 29819 /* empty the waitq */
29822 29820 un->un_waitq_headp = un->un_waitq_tailp = NULL;
29823 29821
29824 29822 } else {
29825 29823 /*
29826 29824 * Go thru the wait queue, pick off all entries with
29827 29825 * B_FAILFAST set, and move these onto the failfast queue.
29828 29826 */
29829 29827 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
29830 29828 /*
29831 29829 * Save the pointer to the next bp on the wait queue,
29832 29830 * so we get to it on the next iteration of this loop.
29833 29831 */
29834 29832 next_waitq_bp = bp->av_forw;
29835 29833
29836 29834 /*
29837 29835 * If this bp from the wait queue does NOT have
29838 29836 * B_FAILFAST set, just move on to the next element
29839 29837 * in the wait queue. Note, this is the only place
29840 29838 * where it is correct to set prev_waitq_bp.
29841 29839 */
29842 29840 if ((bp->b_flags & B_FAILFAST) == 0) {
29843 29841 prev_waitq_bp = bp;
29844 29842 continue;
29845 29843 }
29846 29844
29847 29845 /*
29848 29846 * Remove the bp from the wait queue.
29849 29847 */
29850 29848 if (bp == un->un_waitq_headp) {
29851 29849 /* The bp is the first element of the waitq. */
29852 29850 un->un_waitq_headp = next_waitq_bp;
29853 29851 if (un->un_waitq_headp == NULL) {
29854 29852 /* The wait queue is now empty */
29855 29853 un->un_waitq_tailp = NULL;
29856 29854 }
29857 29855 } else {
29858 29856 /*
29859 29857 * The bp is either somewhere in the middle
29860 29858 * or at the end of the wait queue.
29861 29859 */
29862 29860 ASSERT(un->un_waitq_headp != NULL);
29863 29861 ASSERT(prev_waitq_bp != NULL);
29864 29862 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
29865 29863 == 0);
29866 29864 if (bp == un->un_waitq_tailp) {
29867 29865 /* bp is the last entry on the waitq. */
29868 29866 ASSERT(next_waitq_bp == NULL);
29869 29867 un->un_waitq_tailp = prev_waitq_bp;
29870 29868 }
29871 29869 prev_waitq_bp->av_forw = next_waitq_bp;
29872 29870 }
29873 29871 bp->av_forw = NULL;
29874 29872
29875 29873 /*
29876 29874 * update kstat since the bp is moved out of
29877 29875 * the waitq
29878 29876 */
29879 29877 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29880 29878
29881 29879 /*
29882 29880 * Now put the bp onto the failfast queue.
29883 29881 */
29884 29882 if (un->un_failfast_headp == NULL) {
29885 29883 /* failfast queue is currently empty */
29886 29884 ASSERT(un->un_failfast_tailp == NULL);
29887 29885 un->un_failfast_headp =
29888 29886 un->un_failfast_tailp = bp;
29889 29887 } else {
29890 29888 /* Add the bp to the end of the failfast q */
29891 29889 ASSERT(un->un_failfast_tailp != NULL);
29892 29890 ASSERT(un->un_failfast_tailp->b_flags &
29893 29891 B_FAILFAST);
29894 29892 un->un_failfast_tailp->av_forw = bp;
29895 29893 un->un_failfast_tailp = bp;
29896 29894 }
29897 29895 }
29898 29896 }
29899 29897
29900 29898 /*
29901 29899 * Now return all bp's on the failfast queue to their owners.
29902 29900 */
29903 29901 while ((bp = un->un_failfast_headp) != NULL) {
29904 29902
29905 29903 un->un_failfast_headp = bp->av_forw;
29906 29904 if (un->un_failfast_headp == NULL) {
29907 29905 un->un_failfast_tailp = NULL;
29908 29906 }
29909 29907
29910 29908 /*
29911 29909 * We want to return the bp with a failure error code, but
29912 29910 * we do not want a call to sd_start_cmds() to occur here,
29913 29911 * so use sd_return_failed_command_no_restart() instead of
29914 29912 * sd_return_failed_command().
29915 29913 */
29916 29914 sd_return_failed_command_no_restart(un, bp, EIO);
29917 29915 }
29918 29916
29919 29917 /* Flush the xbuf queues if required. */
29920 29918 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
29921 29919 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
29922 29920 }
29923 29921
29924 29922 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29925 29923 "sd_failfast_flushq: exit: un:0x%p\n", un);
29926 29924 }
29927 29925
29928 29926
29929 29927 /*
29930 29928 * Function: sd_failfast_flushq_callback
29931 29929 *
29932 29930 * Description: Return TRUE if the given bp meets the criteria for failfast
29933 29931 * flushing. Used with ddi_xbuf_flushq(9F).
29934 29932 *
29935 29933 * Arguments: bp - ptr to buf struct to be examined.
29936 29934 *
29937 29935 * Context: Any
29938 29936 */
29939 29937
29940 29938 static int
29941 29939 sd_failfast_flushq_callback(struct buf *bp)
29942 29940 {
29943 29941 /*
29944 29942 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29945 29943 * state is entered; OR (2) the given bp has B_FAILFAST set.
29946 29944 */
29947 29945 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29948 29946 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
29949 29947 }
29950 29948
29951 29949
29952 29950
29953 29951 /*
29954 29952 * Function: sd_setup_next_xfer
29955 29953 *
29956 29954 * Description: Prepare next I/O operation using DMA_PARTIAL
29957 29955 *
29958 29956 */
29959 29957
29960 29958 static int
29961 29959 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
29962 29960 struct scsi_pkt *pkt, struct sd_xbuf *xp)
29963 29961 {
29964 29962 ssize_t num_blks_not_xfered;
29965 29963 daddr_t strt_blk_num;
29966 29964 ssize_t bytes_not_xfered;
29967 29965 int rval;
29968 29966
29969 29967 ASSERT(pkt->pkt_resid == 0);
29970 29968
29971 29969 /*
29972 29970 * Calculate next block number and amount to be transferred.
29973 29971 *
29974 29972 * How much data NOT transfered to the HBA yet.
29975 29973 */
29976 29974 bytes_not_xfered = xp->xb_dma_resid;
29977 29975
29978 29976 /*
29979 29977 * figure how many blocks NOT transfered to the HBA yet.
29980 29978 */
29981 29979 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
29982 29980
29983 29981 /*
29984 29982 * set starting block number to the end of what WAS transfered.
29985 29983 */
29986 29984 strt_blk_num = xp->xb_blkno +
29987 29985 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
29988 29986
29989 29987 /*
29990 29988 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29991 29989 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29992 29990 * the disk mutex here.
29993 29991 */
29994 29992 rval = sd_setup_next_rw_pkt(un, pkt, bp,
29995 29993 strt_blk_num, num_blks_not_xfered);
29996 29994
29997 29995 if (rval == 0) {
29998 29996
29999 29997 /*
30000 29998 * Success.
30001 29999 *
30002 30000 * Adjust things if there are still more blocks to be
30003 30001 * transfered.
30004 30002 */
30005 30003 xp->xb_dma_resid = pkt->pkt_resid;
30006 30004 pkt->pkt_resid = 0;
30007 30005
30008 30006 return (1);
30009 30007 }
30010 30008
30011 30009 /*
30012 30010 * There's really only one possible return value from
30013 30011 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
30014 30012 * returns NULL.
30015 30013 */
30016 30014 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
30017 30015
30018 30016 bp->b_resid = bp->b_bcount;
30019 30017 bp->b_flags |= B_ERROR;
30020 30018
30021 30019 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
30022 30020 "Error setting up next portion of DMA transfer\n");
30023 30021
30024 30022 return (0);
30025 30023 }
30026 30024
30027 30025 /*
30028 30026 * Function: sd_panic_for_res_conflict
30029 30027 *
30030 30028 * Description: Call panic with a string formatted with "Reservation Conflict"
30031 30029 * and a human readable identifier indicating the SD instance
30032 30030 * that experienced the reservation conflict.
30033 30031 *
30034 30032 * Arguments: un - pointer to the soft state struct for the instance.
30035 30033 *
30036 30034 * Context: may execute in interrupt context.
30037 30035 */
30038 30036
30039 30037 #define SD_RESV_CONFLICT_FMT_LEN 40
30040 30038 void
30041 30039 sd_panic_for_res_conflict(struct sd_lun *un)
30042 30040 {
30043 30041 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN];
30044 30042 char path_str[MAXPATHLEN];
30045 30043
30046 30044 (void) snprintf(panic_str, sizeof (panic_str),
30047 30045 "Reservation Conflict\nDisk: %s",
30048 30046 ddi_pathname(SD_DEVINFO(un), path_str));
30049 30047
30050 30048 panic(panic_str);
30051 30049 }
30052 30050
30053 30051 /*
30054 30052 * Note: The following sd_faultinjection_ioctl( ) routines implement
30055 30053 * driver support for handling fault injection for error analysis
30056 30054 * causing faults in multiple layers of the driver.
30057 30055 *
30058 30056 */
30059 30057
30060 30058 #ifdef SD_FAULT_INJECTION
30061 30059 static uint_t sd_fault_injection_on = 0;
30062 30060
30063 30061 /*
30064 30062 * Function: sd_faultinjection_ioctl()
30065 30063 *
30066 30064 * Description: This routine is the driver entry point for handling
30067 30065 * faultinjection ioctls to inject errors into the
30068 30066 * layer model
30069 30067 *
30070 30068 * Arguments: cmd - the ioctl cmd received
30071 30069 * arg - the arguments from user and returns
30072 30070 */
30073 30071
30074 30072 static void
30075 30073 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) {
30076 30074
30077 30075 uint_t i = 0;
30078 30076 uint_t rval;
30079 30077
30080 30078 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30081 30079
30082 30080 mutex_enter(SD_MUTEX(un));
30083 30081
30084 30082 switch (cmd) {
30085 30083 case SDIOCRUN:
30086 30084 /* Allow pushed faults to be injected */
30087 30085 SD_INFO(SD_LOG_SDTEST, un,
30088 30086 "sd_faultinjection_ioctl: Injecting Fault Run\n");
30089 30087
30090 30088 sd_fault_injection_on = 1;
30091 30089
30092 30090 SD_INFO(SD_LOG_IOERR, un,
30093 30091 "sd_faultinjection_ioctl: run finished\n");
30094 30092 break;
30095 30093
30096 30094 case SDIOCSTART:
30097 30095 /* Start Injection Session */
30098 30096 SD_INFO(SD_LOG_SDTEST, un,
30099 30097 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30100 30098
30101 30099 sd_fault_injection_on = 0;
30102 30100 un->sd_injection_mask = 0xFFFFFFFF;
30103 30101 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30104 30102 un->sd_fi_fifo_pkt[i] = NULL;
30105 30103 un->sd_fi_fifo_xb[i] = NULL;
30106 30104 un->sd_fi_fifo_un[i] = NULL;
30107 30105 un->sd_fi_fifo_arq[i] = NULL;
30108 30106 }
30109 30107 un->sd_fi_fifo_start = 0;
30110 30108 un->sd_fi_fifo_end = 0;
30111 30109
30112 30110 mutex_enter(&(un->un_fi_mutex));
30113 30111 un->sd_fi_log[0] = '\0';
30114 30112 un->sd_fi_buf_len = 0;
30115 30113 mutex_exit(&(un->un_fi_mutex));
30116 30114
30117 30115 SD_INFO(SD_LOG_IOERR, un,
30118 30116 "sd_faultinjection_ioctl: start finished\n");
30119 30117 break;
30120 30118
30121 30119 case SDIOCSTOP:
30122 30120 /* Stop Injection Session */
30123 30121 SD_INFO(SD_LOG_SDTEST, un,
30124 30122 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30125 30123 sd_fault_injection_on = 0;
30126 30124 un->sd_injection_mask = 0x0;
30127 30125
30128 30126 /* Empty stray or unuseds structs from fifo */
30129 30127 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30130 30128 if (un->sd_fi_fifo_pkt[i] != NULL) {
30131 30129 kmem_free(un->sd_fi_fifo_pkt[i],
30132 30130 sizeof (struct sd_fi_pkt));
30133 30131 }
30134 30132 if (un->sd_fi_fifo_xb[i] != NULL) {
30135 30133 kmem_free(un->sd_fi_fifo_xb[i],
30136 30134 sizeof (struct sd_fi_xb));
30137 30135 }
30138 30136 if (un->sd_fi_fifo_un[i] != NULL) {
30139 30137 kmem_free(un->sd_fi_fifo_un[i],
30140 30138 sizeof (struct sd_fi_un));
30141 30139 }
30142 30140 if (un->sd_fi_fifo_arq[i] != NULL) {
30143 30141 kmem_free(un->sd_fi_fifo_arq[i],
30144 30142 sizeof (struct sd_fi_arq));
30145 30143 }
30146 30144 un->sd_fi_fifo_pkt[i] = NULL;
30147 30145 un->sd_fi_fifo_un[i] = NULL;
30148 30146 un->sd_fi_fifo_xb[i] = NULL;
30149 30147 un->sd_fi_fifo_arq[i] = NULL;
30150 30148 }
30151 30149 un->sd_fi_fifo_start = 0;
30152 30150 un->sd_fi_fifo_end = 0;
30153 30151
30154 30152 SD_INFO(SD_LOG_IOERR, un,
30155 30153 "sd_faultinjection_ioctl: stop finished\n");
30156 30154 break;
30157 30155
30158 30156 case SDIOCINSERTPKT:
30159 30157 /* Store a packet struct to be pushed onto fifo */
30160 30158 SD_INFO(SD_LOG_SDTEST, un,
30161 30159 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30162 30160
30163 30161 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30164 30162
30165 30163 sd_fault_injection_on = 0;
30166 30164
30167 30165 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30168 30166 if (un->sd_fi_fifo_pkt[i] != NULL) {
30169 30167 kmem_free(un->sd_fi_fifo_pkt[i],
30170 30168 sizeof (struct sd_fi_pkt));
30171 30169 }
30172 30170 if (arg != NULL) {
30173 30171 un->sd_fi_fifo_pkt[i] =
30174 30172 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30175 30173 if (un->sd_fi_fifo_pkt[i] == NULL) {
30176 30174 /* Alloc failed don't store anything */
30177 30175 break;
30178 30176 }
30179 30177 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30180 30178 sizeof (struct sd_fi_pkt), 0);
30181 30179 if (rval == -1) {
30182 30180 kmem_free(un->sd_fi_fifo_pkt[i],
30183 30181 sizeof (struct sd_fi_pkt));
30184 30182 un->sd_fi_fifo_pkt[i] = NULL;
30185 30183 }
30186 30184 } else {
30187 30185 SD_INFO(SD_LOG_IOERR, un,
30188 30186 "sd_faultinjection_ioctl: pkt null\n");
30189 30187 }
30190 30188 break;
30191 30189
30192 30190 case SDIOCINSERTXB:
30193 30191 /* Store a xb struct to be pushed onto fifo */
30194 30192 SD_INFO(SD_LOG_SDTEST, un,
30195 30193 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30196 30194
30197 30195 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30198 30196
30199 30197 sd_fault_injection_on = 0;
30200 30198
30201 30199 if (un->sd_fi_fifo_xb[i] != NULL) {
30202 30200 kmem_free(un->sd_fi_fifo_xb[i],
30203 30201 sizeof (struct sd_fi_xb));
30204 30202 un->sd_fi_fifo_xb[i] = NULL;
30205 30203 }
30206 30204 if (arg != NULL) {
30207 30205 un->sd_fi_fifo_xb[i] =
30208 30206 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30209 30207 if (un->sd_fi_fifo_xb[i] == NULL) {
30210 30208 /* Alloc failed don't store anything */
30211 30209 break;
30212 30210 }
30213 30211 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30214 30212 sizeof (struct sd_fi_xb), 0);
30215 30213
30216 30214 if (rval == -1) {
30217 30215 kmem_free(un->sd_fi_fifo_xb[i],
30218 30216 sizeof (struct sd_fi_xb));
30219 30217 un->sd_fi_fifo_xb[i] = NULL;
30220 30218 }
30221 30219 } else {
30222 30220 SD_INFO(SD_LOG_IOERR, un,
30223 30221 "sd_faultinjection_ioctl: xb null\n");
30224 30222 }
30225 30223 break;
30226 30224
30227 30225 case SDIOCINSERTUN:
30228 30226 /* Store a un struct to be pushed onto fifo */
30229 30227 SD_INFO(SD_LOG_SDTEST, un,
30230 30228 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30231 30229
30232 30230 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30233 30231
30234 30232 sd_fault_injection_on = 0;
30235 30233
30236 30234 if (un->sd_fi_fifo_un[i] != NULL) {
30237 30235 kmem_free(un->sd_fi_fifo_un[i],
30238 30236 sizeof (struct sd_fi_un));
30239 30237 un->sd_fi_fifo_un[i] = NULL;
30240 30238 }
30241 30239 if (arg != NULL) {
30242 30240 un->sd_fi_fifo_un[i] =
30243 30241 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30244 30242 if (un->sd_fi_fifo_un[i] == NULL) {
30245 30243 /* Alloc failed don't store anything */
30246 30244 break;
30247 30245 }
30248 30246 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30249 30247 sizeof (struct sd_fi_un), 0);
30250 30248 if (rval == -1) {
30251 30249 kmem_free(un->sd_fi_fifo_un[i],
30252 30250 sizeof (struct sd_fi_un));
30253 30251 un->sd_fi_fifo_un[i] = NULL;
30254 30252 }
30255 30253
30256 30254 } else {
30257 30255 SD_INFO(SD_LOG_IOERR, un,
30258 30256 "sd_faultinjection_ioctl: un null\n");
30259 30257 }
30260 30258
30261 30259 break;
30262 30260
30263 30261 case SDIOCINSERTARQ:
30264 30262 /* Store a arq struct to be pushed onto fifo */
30265 30263 SD_INFO(SD_LOG_SDTEST, un,
30266 30264 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30267 30265 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30268 30266
30269 30267 sd_fault_injection_on = 0;
30270 30268
30271 30269 if (un->sd_fi_fifo_arq[i] != NULL) {
30272 30270 kmem_free(un->sd_fi_fifo_arq[i],
30273 30271 sizeof (struct sd_fi_arq));
30274 30272 un->sd_fi_fifo_arq[i] = NULL;
30275 30273 }
30276 30274 if (arg != NULL) {
30277 30275 un->sd_fi_fifo_arq[i] =
30278 30276 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30279 30277 if (un->sd_fi_fifo_arq[i] == NULL) {
30280 30278 /* Alloc failed don't store anything */
30281 30279 break;
30282 30280 }
30283 30281 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30284 30282 sizeof (struct sd_fi_arq), 0);
30285 30283 if (rval == -1) {
30286 30284 kmem_free(un->sd_fi_fifo_arq[i],
30287 30285 sizeof (struct sd_fi_arq));
30288 30286 un->sd_fi_fifo_arq[i] = NULL;
30289 30287 }
30290 30288
30291 30289 } else {
30292 30290 SD_INFO(SD_LOG_IOERR, un,
30293 30291 "sd_faultinjection_ioctl: arq null\n");
30294 30292 }
30295 30293
30296 30294 break;
30297 30295
30298 30296 case SDIOCPUSH:
30299 30297 /* Push stored xb, pkt, un, and arq onto fifo */
30300 30298 sd_fault_injection_on = 0;
30301 30299
30302 30300 if (arg != NULL) {
30303 30301 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30304 30302 if (rval != -1 &&
30305 30303 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30306 30304 un->sd_fi_fifo_end += i;
30307 30305 }
30308 30306 } else {
30309 30307 SD_INFO(SD_LOG_IOERR, un,
30310 30308 "sd_faultinjection_ioctl: push arg null\n");
30311 30309 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30312 30310 un->sd_fi_fifo_end++;
30313 30311 }
30314 30312 }
30315 30313 SD_INFO(SD_LOG_IOERR, un,
30316 30314 "sd_faultinjection_ioctl: push to end=%d\n",
30317 30315 un->sd_fi_fifo_end);
30318 30316 break;
30319 30317
30320 30318 case SDIOCRETRIEVE:
30321 30319 /* Return buffer of log from Injection session */
30322 30320 SD_INFO(SD_LOG_SDTEST, un,
30323 30321 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30324 30322
30325 30323 sd_fault_injection_on = 0;
30326 30324
30327 30325 mutex_enter(&(un->un_fi_mutex));
30328 30326 rval = ddi_copyout(un->sd_fi_log, (void *)arg,
30329 30327 un->sd_fi_buf_len+1, 0);
30330 30328 mutex_exit(&(un->un_fi_mutex));
30331 30329
30332 30330 if (rval == -1) {
30333 30331 /*
30334 30332 * arg is possibly invalid setting
30335 30333 * it to NULL for return
30336 30334 */
30337 30335 arg = NULL;
30338 30336 }
30339 30337 break;
30340 30338 }
30341 30339
30342 30340 mutex_exit(SD_MUTEX(un));
30343 30341 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:"
30344 30342 " exit\n");
30345 30343 }
30346 30344
30347 30345
30348 30346 /*
30349 30347 * Function: sd_injection_log()
30350 30348 *
30351 30349 * Description: This routine adds buff to the already existing injection log
30352 30350 * for retrieval via faultinjection_ioctl for use in fault
30353 30351 * detection and recovery
30354 30352 *
30355 30353 * Arguments: buf - the string to add to the log
30356 30354 */
30357 30355
30358 30356 static void
30359 30357 sd_injection_log(char *buf, struct sd_lun *un)
30360 30358 {
30361 30359 uint_t len;
30362 30360
30363 30361 ASSERT(un != NULL);
30364 30362 ASSERT(buf != NULL);
30365 30363
30366 30364 mutex_enter(&(un->un_fi_mutex));
30367 30365
30368 30366 len = min(strlen(buf), 255);
30369 30367 /* Add logged value to Injection log to be returned later */
30370 30368 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
30371 30369 uint_t offset = strlen((char *)un->sd_fi_log);
30372 30370 char *destp = (char *)un->sd_fi_log + offset;
30373 30371 int i;
30374 30372 for (i = 0; i < len; i++) {
30375 30373 *destp++ = *buf++;
30376 30374 }
30377 30375 un->sd_fi_buf_len += len;
30378 30376 un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30379 30377 }
30380 30378
30381 30379 mutex_exit(&(un->un_fi_mutex));
30382 30380 }
30383 30381
30384 30382
30385 30383 /*
30386 30384 * Function: sd_faultinjection()
30387 30385 *
30388 30386 * Description: This routine takes the pkt and changes its
30389 30387 * content based on error injection scenerio.
30390 30388 *
30391 30389 * Arguments: pktp - packet to be changed
30392 30390 */
30393 30391
30394 30392 static void
30395 30393 sd_faultinjection(struct scsi_pkt *pktp)
30396 30394 {
30397 30395 uint_t i;
30398 30396 struct sd_fi_pkt *fi_pkt;
30399 30397 struct sd_fi_xb *fi_xb;
30400 30398 struct sd_fi_un *fi_un;
30401 30399 struct sd_fi_arq *fi_arq;
30402 30400 struct buf *bp;
30403 30401 struct sd_xbuf *xb;
30404 30402 struct sd_lun *un;
30405 30403
30406 30404 ASSERT(pktp != NULL);
30407 30405
30408 30406 /* pull bp xb and un from pktp */
30409 30407 bp = (struct buf *)pktp->pkt_private;
30410 30408 xb = SD_GET_XBUF(bp);
30411 30409 un = SD_GET_UN(bp);
30412 30410
30413 30411 ASSERT(un != NULL);
30414 30412
30415 30413 mutex_enter(SD_MUTEX(un));
30416 30414
30417 30415 SD_TRACE(SD_LOG_SDTEST, un,
30418 30416 "sd_faultinjection: entry Injection from sdintr\n");
30419 30417
30420 30418 /* if injection is off return */
30421 30419 if (sd_fault_injection_on == 0 ||
30422 30420 un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30423 30421 mutex_exit(SD_MUTEX(un));
30424 30422 return;
30425 30423 }
30426 30424
30427 30425 SD_INFO(SD_LOG_SDTEST, un,
30428 30426 "sd_faultinjection: is working for copying\n");
30429 30427
30430 30428 /* take next set off fifo */
30431 30429 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30432 30430
30433 30431 fi_pkt = un->sd_fi_fifo_pkt[i];
30434 30432 fi_xb = un->sd_fi_fifo_xb[i];
30435 30433 fi_un = un->sd_fi_fifo_un[i];
30436 30434 fi_arq = un->sd_fi_fifo_arq[i];
30437 30435
30438 30436
30439 30437 /* set variables accordingly */
30440 30438 /* set pkt if it was on fifo */
30441 30439 if (fi_pkt != NULL) {
30442 30440 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
30443 30441 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
30444 30442 if (fi_pkt->pkt_cdbp != 0xff)
30445 30443 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
30446 30444 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
30447 30445 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
30448 30446 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
30449 30447
30450 30448 }
30451 30449 /* set xb if it was on fifo */
30452 30450 if (fi_xb != NULL) {
30453 30451 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
30454 30452 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
30455 30453 if (fi_xb->xb_retry_count != 0)
30456 30454 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
30457 30455 SD_CONDSET(xb, xb, xb_victim_retry_count,
30458 30456 "xb_victim_retry_count");
30459 30457 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
30460 30458 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
30461 30459 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
30462 30460
30463 30461 /* copy in block data from sense */
30464 30462 /*
30465 30463 * if (fi_xb->xb_sense_data[0] != -1) {
30466 30464 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30467 30465 * SENSE_LENGTH);
30468 30466 * }
30469 30467 */
30470 30468 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
30471 30469
30472 30470 /* copy in extended sense codes */
30473 30471 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30474 30472 xb, es_code, "es_code");
30475 30473 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30476 30474 xb, es_key, "es_key");
30477 30475 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30478 30476 xb, es_add_code, "es_add_code");
30479 30477 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30480 30478 xb, es_qual_code, "es_qual_code");
30481 30479 struct scsi_extended_sense *esp;
30482 30480 esp = (struct scsi_extended_sense *)xb->xb_sense_data;
30483 30481 esp->es_class = CLASS_EXTENDED_SENSE;
30484 30482 }
30485 30483
30486 30484 /* set un if it was on fifo */
30487 30485 if (fi_un != NULL) {
30488 30486 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
30489 30487 SD_CONDSET(un, un, un_ctype, "un_ctype");
30490 30488 SD_CONDSET(un, un, un_reset_retry_count,
30491 30489 "un_reset_retry_count");
30492 30490 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
30493 30491 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
30494 30492 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
30495 30493 SD_CONDSET(un, un, un_f_allow_bus_device_reset,
30496 30494 "un_f_allow_bus_device_reset");
30497 30495 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
30498 30496
30499 30497 }
30500 30498
30501 30499 /* copy in auto request sense if it was on fifo */
30502 30500 if (fi_arq != NULL) {
30503 30501 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
30504 30502 }
30505 30503
30506 30504 /* free structs */
30507 30505 if (un->sd_fi_fifo_pkt[i] != NULL) {
30508 30506 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
30509 30507 }
30510 30508 if (un->sd_fi_fifo_xb[i] != NULL) {
30511 30509 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
30512 30510 }
30513 30511 if (un->sd_fi_fifo_un[i] != NULL) {
30514 30512 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
30515 30513 }
30516 30514 if (un->sd_fi_fifo_arq[i] != NULL) {
30517 30515 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
30518 30516 }
30519 30517
30520 30518 /*
30521 30519 * kmem_free does not gurantee to set to NULL
30522 30520 * since we uses these to determine if we set
30523 30521 * values or not lets confirm they are always
30524 30522 * NULL after free
30525 30523 */
30526 30524 un->sd_fi_fifo_pkt[i] = NULL;
30527 30525 un->sd_fi_fifo_un[i] = NULL;
30528 30526 un->sd_fi_fifo_xb[i] = NULL;
30529 30527 un->sd_fi_fifo_arq[i] = NULL;
30530 30528
30531 30529 un->sd_fi_fifo_start++;
30532 30530
30533 30531 mutex_exit(SD_MUTEX(un));
30534 30532
30535 30533 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
30536 30534 }
30537 30535
30538 30536 #endif /* SD_FAULT_INJECTION */
30539 30537
30540 30538 /*
30541 30539 * This routine is invoked in sd_unit_attach(). Before calling it, the
30542 30540 * properties in conf file should be processed already, and "hotpluggable"
30543 30541 * property was processed also.
30544 30542 *
30545 30543 * The sd driver distinguishes 3 different type of devices: removable media,
30546 30544 * non-removable media, and hotpluggable. Below the differences are defined:
30547 30545 *
30548 30546 * 1. Device ID
30549 30547 *
30550 30548 * The device ID of a device is used to identify this device. Refer to
30551 30549 * ddi_devid_register(9F).
30552 30550 *
30553 30551 * For a non-removable media disk device which can provide 0x80 or 0x83
30554 30552 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30555 30553 * device ID is created to identify this device. For other non-removable
30556 30554 * media devices, a default device ID is created only if this device has
30557 30555 * at least 2 alter cylinders. Otherwise, this device has no devid.
30558 30556 *
30559 30557 * -------------------------------------------------------
30560 30558 * removable media hotpluggable | Can Have Device ID
30561 30559 * -------------------------------------------------------
30562 30560 * false false | Yes
30563 30561 * false true | Yes
30564 30562 * true x | No
30565 30563 * ------------------------------------------------------
30566 30564 *
30567 30565 *
30568 30566 * 2. SCSI group 4 commands
30569 30567 *
30570 30568 * In SCSI specs, only some commands in group 4 command set can use
30571 30569 * 8-byte addresses that can be used to access >2TB storage spaces.
30572 30570 * Other commands have no such capability. Without supporting group4,
30573 30571 * it is impossible to make full use of storage spaces of a disk with
30574 30572 * capacity larger than 2TB.
30575 30573 *
30576 30574 * -----------------------------------------------
30577 30575 * removable media hotpluggable LP64 | Group
30578 30576 * -----------------------------------------------
30579 30577 * false false false | 1
30580 30578 * false false true | 4
30581 30579 * false true false | 1
30582 30580 * false true true | 4
30583 30581 * true x x | 5
30584 30582 * -----------------------------------------------
30585 30583 *
30586 30584 *
30587 30585 * 3. Check for VTOC Label
30588 30586 *
30589 30587 * If a direct-access disk has no EFI label, sd will check if it has a
30590 30588 * valid VTOC label. Now, sd also does that check for removable media
30591 30589 * and hotpluggable devices.
30592 30590 *
30593 30591 * --------------------------------------------------------------
30594 30592 * Direct-Access removable media hotpluggable | Check Label
30595 30593 * -------------------------------------------------------------
30596 30594 * false false false | No
30597 30595 * false false true | No
30598 30596 * false true false | Yes
30599 30597 * false true true | Yes
30600 30598 * true x x | Yes
30601 30599 * --------------------------------------------------------------
30602 30600 *
30603 30601 *
30604 30602 * 4. Building default VTOC label
30605 30603 *
30606 30604 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30607 30605 * If those devices have no valid VTOC label, sd(7d) will attempt to
30608 30606 * create default VTOC for them. Currently sd creates default VTOC label
30609 30607 * for all devices on x86 platform (VTOC_16), but only for removable
30610 30608 * media devices on SPARC (VTOC_8).
30611 30609 *
30612 30610 * -----------------------------------------------------------
30613 30611 * removable media hotpluggable platform | Default Label
30614 30612 * -----------------------------------------------------------
30615 30613 * false false sparc | No
30616 30614 * false true x86 | Yes
30617 30615 * false true sparc | Yes
30618 30616 * true x x | Yes
30619 30617 * ----------------------------------------------------------
30620 30618 *
30621 30619 *
30622 30620 * 5. Supported blocksizes of target devices
30623 30621 *
30624 30622 * Sd supports non-512-byte blocksize for removable media devices only.
30625 30623 * For other devices, only 512-byte blocksize is supported. This may be
30626 30624 * changed in near future because some RAID devices require non-512-byte
30627 30625 * blocksize
30628 30626 *
30629 30627 * -----------------------------------------------------------
30630 30628 * removable media hotpluggable | non-512-byte blocksize
30631 30629 * -----------------------------------------------------------
30632 30630 * false false | No
30633 30631 * false true | No
30634 30632 * true x | Yes
30635 30633 * -----------------------------------------------------------
30636 30634 *
30637 30635 *
30638 30636 * 6. Automatic mount & unmount
30639 30637 *
30640 30638 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30641 30639 * if a device is removable media device. It return 1 for removable media
30642 30640 * devices, and 0 for others.
30643 30641 *
30644 30642 * The automatic mounting subsystem should distinguish between the types
30645 30643 * of devices and apply automounting policies to each.
30646 30644 *
30647 30645 *
30648 30646 * 7. fdisk partition management
30649 30647 *
30650 30648 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30651 30649 * just supports fdisk partitions on x86 platform. On sparc platform, sd
30652 30650 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30653 30651 * fdisk partitions on both x86 and SPARC platform.
30654 30652 *
30655 30653 * -----------------------------------------------------------
30656 30654 * platform removable media USB/1394 | fdisk supported
30657 30655 * -----------------------------------------------------------
30658 30656 * x86 X X | true
30659 30657 * ------------------------------------------------------------
30660 30658 * sparc X X | false
30661 30659 * ------------------------------------------------------------
30662 30660 *
30663 30661 *
30664 30662 * 8. MBOOT/MBR
30665 30663 *
30666 30664 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30667 30665 * read/write mboot for removable media devices on sparc platform.
30668 30666 *
30669 30667 * -----------------------------------------------------------
30670 30668 * platform removable media USB/1394 | mboot supported
30671 30669 * -----------------------------------------------------------
30672 30670 * x86 X X | true
30673 30671 * ------------------------------------------------------------
30674 30672 * sparc false false | false
30675 30673 * sparc false true | true
30676 30674 * sparc true false | true
30677 30675 * sparc true true | true
30678 30676 * ------------------------------------------------------------
30679 30677 *
30680 30678 *
30681 30679 * 9. error handling during opening device
30682 30680 *
30683 30681 * If failed to open a disk device, an errno is returned. For some kinds
30684 30682 * of errors, different errno is returned depending on if this device is
30685 30683 * a removable media device. This brings USB/1394 hard disks in line with
30686 30684 * expected hard disk behavior. It is not expected that this breaks any
30687 30685 * application.
30688 30686 *
30689 30687 * ------------------------------------------------------
30690 30688 * removable media hotpluggable | errno
30691 30689 * ------------------------------------------------------
30692 30690 * false false | EIO
30693 30691 * false true | EIO
30694 30692 * true x | ENXIO
30695 30693 * ------------------------------------------------------
30696 30694 *
30697 30695 *
30698 30696 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30699 30697 *
30700 30698 * These IOCTLs are applicable only to removable media devices.
30701 30699 *
30702 30700 * -----------------------------------------------------------
30703 30701 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30704 30702 * -----------------------------------------------------------
30705 30703 * false false | No
30706 30704 * false true | No
30707 30705 * true x | Yes
30708 30706 * -----------------------------------------------------------
30709 30707 *
30710 30708 *
30711 30709 * 12. Kstats for partitions
30712 30710 *
30713 30711 * sd creates partition kstat for non-removable media devices. USB and
30714 30712 * Firewire hard disks now have partition kstats
30715 30713 *
30716 30714 * ------------------------------------------------------
30717 30715 * removable media hotpluggable | kstat
30718 30716 * ------------------------------------------------------
30719 30717 * false false | Yes
30720 30718 * false true | Yes
30721 30719 * true x | No
30722 30720 * ------------------------------------------------------
30723 30721 *
30724 30722 *
30725 30723 * 13. Removable media & hotpluggable properties
30726 30724 *
30727 30725 * Sd driver creates a "removable-media" property for removable media
30728 30726 * devices. Parent nexus drivers create a "hotpluggable" property if
30729 30727 * it supports hotplugging.
30730 30728 *
30731 30729 * ---------------------------------------------------------------------
30732 30730 * removable media hotpluggable | "removable-media" " hotpluggable"
30733 30731 * ---------------------------------------------------------------------
30734 30732 * false false | No No
30735 30733 * false true | No Yes
30736 30734 * true false | Yes No
30737 30735 * true true | Yes Yes
30738 30736 * ---------------------------------------------------------------------
30739 30737 *
30740 30738 *
30741 30739 * 14. Power Management
30742 30740 *
30743 30741 * sd only power manages removable media devices or devices that support
30744 30742 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30745 30743 *
30746 30744 * A parent nexus that supports hotplugging can also set "pm-capable"
30747 30745 * if the disk can be power managed.
30748 30746 *
30749 30747 * ------------------------------------------------------------
30750 30748 * removable media hotpluggable pm-capable | power manage
30751 30749 * ------------------------------------------------------------
30752 30750 * false false false | No
30753 30751 * false false true | Yes
30754 30752 * false true false | No
30755 30753 * false true true | Yes
30756 30754 * true x x | Yes
30757 30755 * ------------------------------------------------------------
30758 30756 *
30759 30757 * USB and firewire hard disks can now be power managed independently
30760 30758 * of the framebuffer
30761 30759 *
30762 30760 *
30763 30761 * 15. Support for USB disks with capacity larger than 1TB
30764 30762 *
30765 30763 * Currently, sd doesn't permit a fixed disk device with capacity
30766 30764 * larger than 1TB to be used in a 32-bit operating system environment.
30767 30765 * However, sd doesn't do that for removable media devices. Instead, it
30768 30766 * assumes that removable media devices cannot have a capacity larger
30769 30767 * than 1TB. Therefore, using those devices on 32-bit system is partially
30770 30768 * supported, which can cause some unexpected results.
30771 30769 *
30772 30770 * ---------------------------------------------------------------------
30773 30771 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30774 30772 * ---------------------------------------------------------------------
30775 30773 * false false | true | no
30776 30774 * false true | true | no
30777 30775 * true false | true | Yes
30778 30776 * true true | true | Yes
30779 30777 * ---------------------------------------------------------------------
30780 30778 *
30781 30779 *
30782 30780 * 16. Check write-protection at open time
30783 30781 *
30784 30782 * When a removable media device is being opened for writing without NDELAY
30785 30783 * flag, sd will check if this device is writable. If attempting to open
30786 30784 * without NDELAY flag a write-protected device, this operation will abort.
30787 30785 *
30788 30786 * ------------------------------------------------------------
30789 30787 * removable media USB/1394 | WP Check
30790 30788 * ------------------------------------------------------------
30791 30789 * false false | No
30792 30790 * false true | No
30793 30791 * true false | Yes
30794 30792 * true true | Yes
30795 30793 * ------------------------------------------------------------
30796 30794 *
30797 30795 *
30798 30796 * 17. syslog when corrupted VTOC is encountered
30799 30797 *
30800 30798 * Currently, if an invalid VTOC is encountered, sd only print syslog
30801 30799 * for fixed SCSI disks.
30802 30800 * ------------------------------------------------------------
30803 30801 * removable media USB/1394 | print syslog
30804 30802 * ------------------------------------------------------------
30805 30803 * false false | Yes
30806 30804 * false true | No
30807 30805 * true false | No
30808 30806 * true true | No
30809 30807 * ------------------------------------------------------------
30810 30808 */
30811 30809 static void
30812 30810 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
30813 30811 {
30814 30812 int pm_cap;
30815 30813
30816 30814 ASSERT(un->un_sd);
30817 30815 ASSERT(un->un_sd->sd_inq);
30818 30816
30819 30817 /*
30820 30818 * Enable SYNC CACHE support for all devices.
30821 30819 */
30822 30820 un->un_f_sync_cache_supported = TRUE;
30823 30821
30824 30822 /*
30825 30823 * Set the sync cache required flag to false.
30826 30824 * This would ensure that there is no SYNC CACHE
30827 30825 * sent when there are no writes
30828 30826 */
30829 30827 un->un_f_sync_cache_required = FALSE;
30830 30828
30831 30829 if (un->un_sd->sd_inq->inq_rmb) {
30832 30830 /*
30833 30831 * The media of this device is removable. And for this kind
30834 30832 * of devices, it is possible to change medium after opening
30835 30833 * devices. Thus we should support this operation.
30836 30834 */
30837 30835 un->un_f_has_removable_media = TRUE;
30838 30836
30839 30837 /*
30840 30838 * support non-512-byte blocksize of removable media devices
30841 30839 */
30842 30840 un->un_f_non_devbsize_supported = TRUE;
30843 30841
30844 30842 /*
30845 30843 * Assume that all removable media devices support DOOR_LOCK
30846 30844 */
30847 30845 un->un_f_doorlock_supported = TRUE;
30848 30846
30849 30847 /*
30850 30848 * For a removable media device, it is possible to be opened
30851 30849 * with NDELAY flag when there is no media in drive, in this
30852 30850 * case we don't care if device is writable. But if without
30853 30851 * NDELAY flag, we need to check if media is write-protected.
30854 30852 */
30855 30853 un->un_f_chk_wp_open = TRUE;
30856 30854
30857 30855 /*
30858 30856 * need to start a SCSI watch thread to monitor media state,
30859 30857 * when media is being inserted or ejected, notify syseventd.
30860 30858 */
30861 30859 un->un_f_monitor_media_state = TRUE;
30862 30860
30863 30861 /*
30864 30862 * Some devices don't support START_STOP_UNIT command.
30865 30863 * Therefore, we'd better check if a device supports it
30866 30864 * before sending it.
30867 30865 */
30868 30866 un->un_f_check_start_stop = TRUE;
30869 30867
30870 30868 /*
30871 30869 * support eject media ioctl:
30872 30870 * FDEJECT, DKIOCEJECT, CDROMEJECT
30873 30871 */
30874 30872 un->un_f_eject_media_supported = TRUE;
30875 30873
30876 30874 /*
30877 30875 * Because many removable-media devices don't support
30878 30876 * LOG_SENSE, we couldn't use this command to check if
30879 30877 * a removable media device support power-management.
30880 30878 * We assume that they support power-management via
30881 30879 * START_STOP_UNIT command and can be spun up and down
30882 30880 * without limitations.
30883 30881 */
30884 30882 un->un_f_pm_supported = TRUE;
30885 30883
30886 30884 /*
30887 30885 * Need to create a zero length (Boolean) property
30888 30886 * removable-media for the removable media devices.
30889 30887 * Note that the return value of the property is not being
30890 30888 * checked, since if unable to create the property
30891 30889 * then do not want the attach to fail altogether. Consistent
30892 30890 * with other property creation in attach.
30893 30891 */
30894 30892 (void) ddi_prop_create(DDI_DEV_T_NONE, devi,
30895 30893 DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
30896 30894
30897 30895 } else {
30898 30896 /*
30899 30897 * create device ID for device
30900 30898 */
30901 30899 un->un_f_devid_supported = TRUE;
30902 30900
30903 30901 /*
30904 30902 * Spin up non-removable-media devices once it is attached
30905 30903 */
30906 30904 un->un_f_attach_spinup = TRUE;
30907 30905
30908 30906 /*
30909 30907 * According to SCSI specification, Sense data has two kinds of
30910 30908 * format: fixed format, and descriptor format. At present, we
30911 30909 * don't support descriptor format sense data for removable
30912 30910 * media.
30913 30911 */
30914 30912 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
30915 30913 un->un_f_descr_format_supported = TRUE;
30916 30914 }
30917 30915
30918 30916 /*
30919 30917 * kstats are created only for non-removable media devices.
30920 30918 *
30921 30919 * Set this in sd.conf to 0 in order to disable kstats. The
30922 30920 * default is 1, so they are enabled by default.
30923 30921 */
30924 30922 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
30925 30923 SD_DEVINFO(un), DDI_PROP_DONTPASS,
30926 30924 "enable-partition-kstats", 1));
30927 30925
30928 30926 /*
30929 30927 * Check if HBA has set the "pm-capable" property.
30930 30928 * If "pm-capable" exists and is non-zero then we can
30931 30929 * power manage the device without checking the start/stop
30932 30930 * cycle count log sense page.
30933 30931 *
30934 30932 * If "pm-capable" exists and is set to be false (0),
30935 30933 * then we should not power manage the device.
30936 30934 *
30937 30935 * If "pm-capable" doesn't exist then pm_cap will
30938 30936 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30939 30937 * sd will check the start/stop cycle count log sense page
30940 30938 * and power manage the device if the cycle count limit has
30941 30939 * not been exceeded.
30942 30940 */
30943 30941 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
30944 30942 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
30945 30943 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
30946 30944 un->un_f_log_sense_supported = TRUE;
30947 30945 if (!un->un_f_power_condition_disabled &&
30948 30946 SD_INQUIRY(un)->inq_ansi == 6) {
30949 30947 un->un_f_power_condition_supported = TRUE;
30950 30948 }
30951 30949 } else {
30952 30950 /*
30953 30951 * pm-capable property exists.
30954 30952 *
30955 30953 * Convert "TRUE" values for pm_cap to
30956 30954 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30957 30955 * later. "TRUE" values are any values defined in
30958 30956 * inquiry.h.
30959 30957 */
30960 30958 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
30961 30959 un->un_f_log_sense_supported = FALSE;
30962 30960 } else {
30963 30961 /* SD_PM_CAPABLE_IS_TRUE case */
30964 30962 un->un_f_pm_supported = TRUE;
30965 30963 if (!un->un_f_power_condition_disabled &&
30966 30964 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
30967 30965 un->un_f_power_condition_supported =
30968 30966 TRUE;
30969 30967 }
30970 30968 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
30971 30969 un->un_f_log_sense_supported = TRUE;
30972 30970 un->un_f_pm_log_sense_smart =
30973 30971 SD_PM_CAP_SMART_LOG(pm_cap);
30974 30972 }
30975 30973 }
30976 30974
30977 30975 SD_INFO(SD_LOG_ATTACH_DETACH, un,
30978 30976 "sd_unit_attach: un:0x%p pm-capable "
30979 30977 "property set to %d.\n", un, un->un_f_pm_supported);
30980 30978 }
30981 30979 }
30982 30980
30983 30981 if (un->un_f_is_hotpluggable) {
30984 30982
30985 30983 /*
30986 30984 * Have to watch hotpluggable devices as well, since
30987 30985 * that's the only way for userland applications to
30988 30986 * detect hot removal while device is busy/mounted.
30989 30987 */
30990 30988 un->un_f_monitor_media_state = TRUE;
30991 30989
30992 30990 un->un_f_check_start_stop = TRUE;
30993 30991
30994 30992 }
30995 30993 }
30996 30994
30997 30995 /*
30998 30996 * sd_tg_rdwr:
30999 30997 * Provides rdwr access for cmlb via sd_tgops. The start_block is
31000 30998 * in sys block size, req_length in bytes.
31001 30999 *
31002 31000 */
31003 31001 static int
31004 31002 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
31005 31003 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
31006 31004 {
31007 31005 struct sd_lun *un;
31008 31006 int path_flag = (int)(uintptr_t)tg_cookie;
31009 31007 char *dkl = NULL;
31010 31008 diskaddr_t real_addr = start_block;
31011 31009 diskaddr_t first_byte, end_block;
31012 31010
31013 31011 size_t buffer_size = reqlength;
31014 31012 int rval = 0;
31015 31013 diskaddr_t cap;
31016 31014 uint32_t lbasize;
31017 31015 sd_ssc_t *ssc;
31018 31016
31019 31017 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31020 31018 if (un == NULL)
31021 31019 return (ENXIO);
31022 31020
31023 31021 if (cmd != TG_READ && cmd != TG_WRITE)
31024 31022 return (EINVAL);
31025 31023
31026 31024 ssc = sd_ssc_init(un);
31027 31025 mutex_enter(SD_MUTEX(un));
31028 31026 if (un->un_f_tgt_blocksize_is_valid == FALSE) {
31029 31027 mutex_exit(SD_MUTEX(un));
31030 31028 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31031 31029 &lbasize, path_flag);
31032 31030 if (rval != 0)
31033 31031 goto done1;
31034 31032 mutex_enter(SD_MUTEX(un));
31035 31033 sd_update_block_info(un, lbasize, cap);
31036 31034 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
31037 31035 mutex_exit(SD_MUTEX(un));
31038 31036 rval = EIO;
31039 31037 goto done;
31040 31038 }
31041 31039 }
31042 31040
31043 31041 if (NOT_DEVBSIZE(un)) {
31044 31042 /*
31045 31043 * sys_blocksize != tgt_blocksize, need to re-adjust
31046 31044 * blkno and save the index to beginning of dk_label
31047 31045 */
31048 31046 first_byte = SD_SYSBLOCKS2BYTES(start_block);
31049 31047 real_addr = first_byte / un->un_tgt_blocksize;
31050 31048
31051 31049 end_block = (first_byte + reqlength +
31052 31050 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
31053 31051
31054 31052 /* round up buffer size to multiple of target block size */
31055 31053 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
31056 31054
31057 31055 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
31058 31056 "label_addr: 0x%x allocation size: 0x%x\n",
31059 31057 real_addr, buffer_size);
31060 31058
31061 31059 if (((first_byte % un->un_tgt_blocksize) != 0) ||
31062 31060 (reqlength % un->un_tgt_blocksize) != 0)
31063 31061 /* the request is not aligned */
31064 31062 dkl = kmem_zalloc(buffer_size, KM_SLEEP);
31065 31063 }
31066 31064
31067 31065 /*
31068 31066 * The MMC standard allows READ CAPACITY to be
31069 31067 * inaccurate by a bounded amount (in the interest of
31070 31068 * response latency). As a result, failed READs are
31071 31069 * commonplace (due to the reading of metadata and not
31072 31070 * data). Depending on the per-Vendor/drive Sense data,
31073 31071 * the failed READ can cause many (unnecessary) retries.
31074 31072 */
31075 31073
31076 31074 if (ISCD(un) && (cmd == TG_READ) &&
31077 31075 (un->un_f_blockcount_is_valid == TRUE) &&
31078 31076 ((start_block == (un->un_blockcount - 1))||
31079 31077 (start_block == (un->un_blockcount - 2)))) {
31080 31078 path_flag = SD_PATH_DIRECT_PRIORITY;
31081 31079 }
31082 31080
31083 31081 mutex_exit(SD_MUTEX(un));
31084 31082 if (cmd == TG_READ) {
31085 31083 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr,
31086 31084 buffer_size, real_addr, path_flag);
31087 31085 if (dkl != NULL)
31088 31086 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
31089 31087 real_addr), bufaddr, reqlength);
31090 31088 } else {
31091 31089 if (dkl) {
31092 31090 rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
31093 31091 real_addr, path_flag);
31094 31092 if (rval) {
31095 31093 goto done1;
31096 31094 }
31097 31095 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
31098 31096 real_addr), reqlength);
31099 31097 }
31100 31098 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr,
31101 31099 buffer_size, real_addr, path_flag);
31102 31100 }
31103 31101
31104 31102 done1:
31105 31103 if (dkl != NULL)
31106 31104 kmem_free(dkl, buffer_size);
31107 31105
31108 31106 if (rval != 0) {
31109 31107 if (rval == EIO)
31110 31108 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
31111 31109 else
31112 31110 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31113 31111 }
31114 31112 done:
31115 31113 sd_ssc_fini(ssc);
31116 31114 return (rval);
31117 31115 }
31118 31116
31119 31117
31120 31118 static int
31121 31119 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
31122 31120 {
31123 31121
31124 31122 struct sd_lun *un;
31125 31123 diskaddr_t cap;
31126 31124 uint32_t lbasize;
31127 31125 int path_flag = (int)(uintptr_t)tg_cookie;
31128 31126 int ret = 0;
31129 31127
31130 31128 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31131 31129 if (un == NULL)
31132 31130 return (ENXIO);
31133 31131
31134 31132 switch (cmd) {
31135 31133 case TG_GETPHYGEOM:
31136 31134 case TG_GETVIRTGEOM:
31137 31135 case TG_GETCAPACITY:
31138 31136 case TG_GETBLOCKSIZE:
31139 31137 mutex_enter(SD_MUTEX(un));
31140 31138
31141 31139 if ((un->un_f_blockcount_is_valid == TRUE) &&
31142 31140 (un->un_f_tgt_blocksize_is_valid == TRUE)) {
31143 31141 cap = un->un_blockcount;
31144 31142 lbasize = un->un_tgt_blocksize;
31145 31143 mutex_exit(SD_MUTEX(un));
31146 31144 } else {
31147 31145 sd_ssc_t *ssc;
31148 31146 mutex_exit(SD_MUTEX(un));
31149 31147 ssc = sd_ssc_init(un);
31150 31148 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31151 31149 &lbasize, path_flag);
31152 31150 if (ret != 0) {
31153 31151 if (ret == EIO)
31154 31152 sd_ssc_assessment(ssc,
31155 31153 SD_FMT_STATUS_CHECK);
31156 31154 else
31157 31155 sd_ssc_assessment(ssc,
31158 31156 SD_FMT_IGNORE);
31159 31157 sd_ssc_fini(ssc);
31160 31158 return (ret);
31161 31159 }
31162 31160 sd_ssc_fini(ssc);
31163 31161 mutex_enter(SD_MUTEX(un));
31164 31162 sd_update_block_info(un, lbasize, cap);
31165 31163 if ((un->un_f_blockcount_is_valid == FALSE) ||
31166 31164 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
31167 31165 mutex_exit(SD_MUTEX(un));
31168 31166 return (EIO);
31169 31167 }
31170 31168 mutex_exit(SD_MUTEX(un));
31171 31169 }
31172 31170
31173 31171 if (cmd == TG_GETCAPACITY) {
31174 31172 *(diskaddr_t *)arg = cap;
31175 31173 return (0);
31176 31174 }
31177 31175
31178 31176 if (cmd == TG_GETBLOCKSIZE) {
31179 31177 *(uint32_t *)arg = lbasize;
31180 31178 return (0);
31181 31179 }
31182 31180
31183 31181 if (cmd == TG_GETPHYGEOM)
31184 31182 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
31185 31183 cap, lbasize, path_flag);
31186 31184 else
31187 31185 /* TG_GETVIRTGEOM */
31188 31186 ret = sd_get_virtual_geometry(un,
31189 31187 (cmlb_geom_t *)arg, cap, lbasize);
31190 31188
31191 31189 return (ret);
31192 31190
31193 31191 case TG_GETATTR:
31194 31192 mutex_enter(SD_MUTEX(un));
31195 31193 ((tg_attribute_t *)arg)->media_is_writable =
31196 31194 un->un_f_mmc_writable_media;
31197 31195 ((tg_attribute_t *)arg)->media_is_solid_state =
31198 31196 un->un_f_is_solid_state;
31199 31197 mutex_exit(SD_MUTEX(un));
31200 31198 return (0);
31201 31199 default:
31202 31200 return (ENOTTY);
31203 31201
31204 31202 }
31205 31203 }
31206 31204
31207 31205 /*
31208 31206 * Function: sd_ssc_ereport_post
31209 31207 *
31210 31208 * Description: Will be called when SD driver need to post an ereport.
31211 31209 *
31212 31210 * Context: Kernel thread or interrupt context.
31213 31211 */
31214 31212
31215 31213 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31216 31214
31217 31215 static void
31218 31216 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31219 31217 {
31220 31218 int uscsi_path_instance = 0;
31221 31219 uchar_t uscsi_pkt_reason;
31222 31220 uint32_t uscsi_pkt_state;
31223 31221 uint32_t uscsi_pkt_statistics;
31224 31222 uint64_t uscsi_ena;
31225 31223 uchar_t op_code;
31226 31224 uint8_t *sensep;
31227 31225 union scsi_cdb *cdbp;
31228 31226 uint_t cdblen = 0;
31229 31227 uint_t senlen = 0;
31230 31228 struct sd_lun *un;
31231 31229 dev_info_t *dip;
31232 31230 char *devid;
31233 31231 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
31234 31232 SSC_FLAGS_INVALID_STATUS |
31235 31233 SSC_FLAGS_INVALID_SENSE |
31236 31234 SSC_FLAGS_INVALID_DATA;
31237 31235 char assessment[16];
31238 31236
31239 31237 ASSERT(ssc != NULL);
31240 31238 ASSERT(ssc->ssc_uscsi_cmd != NULL);
31241 31239 ASSERT(ssc->ssc_uscsi_info != NULL);
31242 31240
31243 31241 un = ssc->ssc_un;
31244 31242 ASSERT(un != NULL);
31245 31243
31246 31244 dip = un->un_sd->sd_dev;
31247 31245
31248 31246 /*
31249 31247 * Get the devid:
31250 31248 * devid will only be passed to non-transport error reports.
31251 31249 */
31252 31250 devid = DEVI(dip)->devi_devid_str;
31253 31251
31254 31252 /*
31255 31253 * If we are syncing or dumping, the command will not be executed
31256 31254 * so we bypass this situation.
31257 31255 */
31258 31256 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31259 31257 (un->un_state == SD_STATE_DUMPING))
31260 31258 return;
31261 31259
31262 31260 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31263 31261 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31264 31262 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31265 31263 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31266 31264 uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31267 31265
31268 31266 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31269 31267 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31270 31268
31271 31269 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31272 31270 if (cdbp == NULL) {
31273 31271 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
31274 31272 "sd_ssc_ereport_post meet empty cdb\n");
31275 31273 return;
31276 31274 }
31277 31275
31278 31276 op_code = cdbp->scc_cmd;
31279 31277
31280 31278 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
31281 31279 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
31282 31280 ssc->ssc_uscsi_cmd->uscsi_rqresid);
31283 31281
31284 31282 if (senlen > 0)
31285 31283 ASSERT(sensep != NULL);
31286 31284
31287 31285 /*
31288 31286 * Initialize drv_assess to corresponding values.
31289 31287 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31290 31288 * on the sense-key returned back.
31291 31289 */
31292 31290 switch (drv_assess) {
31293 31291 case SD_FM_DRV_RECOVERY:
31294 31292 (void) sprintf(assessment, "%s", "recovered");
31295 31293 break;
31296 31294 case SD_FM_DRV_RETRY:
31297 31295 (void) sprintf(assessment, "%s", "retry");
31298 31296 break;
31299 31297 case SD_FM_DRV_NOTICE:
31300 31298 (void) sprintf(assessment, "%s", "info");
31301 31299 break;
31302 31300 case SD_FM_DRV_FATAL:
31303 31301 default:
31304 31302 (void) sprintf(assessment, "%s", "unknown");
31305 31303 }
31306 31304 /*
31307 31305 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31308 31306 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31309 31307 * driver-assessment will always be "recovered" here.
31310 31308 */
31311 31309 if (drv_assess == SD_FM_DRV_RECOVERY) {
31312 31310 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31313 31311 "cmd.disk.recovered", uscsi_ena, devid, NULL,
31314 31312 DDI_NOSLEEP, NULL,
31315 31313 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31316 31314 DEVID_IF_KNOWN(devid),
31317 31315 "driver-assessment", DATA_TYPE_STRING, assessment,
31318 31316 "op-code", DATA_TYPE_UINT8, op_code,
31319 31317 "cdb", DATA_TYPE_UINT8_ARRAY,
31320 31318 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31321 31319 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31322 31320 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31323 31321 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31324 31322 NULL);
31325 31323 return;
31326 31324 }
31327 31325
31328 31326 /*
31329 31327 * If there is un-expected/un-decodable data, we should post
31330 31328 * ereport.io.scsi.cmd.disk.dev.uderr.
31331 31329 * driver-assessment will be set based on parameter drv_assess.
31332 31330 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31333 31331 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31334 31332 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31335 31333 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31336 31334 */
31337 31335 if (ssc->ssc_flags & ssc_invalid_flags) {
31338 31336 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31339 31337 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31340 31338 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31341 31339 NULL, DDI_NOSLEEP, NULL,
31342 31340 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31343 31341 DEVID_IF_KNOWN(devid),
31344 31342 "driver-assessment", DATA_TYPE_STRING,
31345 31343 drv_assess == SD_FM_DRV_FATAL ?
31346 31344 "fail" : assessment,
31347 31345 "op-code", DATA_TYPE_UINT8, op_code,
31348 31346 "cdb", DATA_TYPE_UINT8_ARRAY,
31349 31347 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31350 31348 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31351 31349 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31352 31350 "pkt-stats", DATA_TYPE_UINT32,
31353 31351 uscsi_pkt_statistics,
31354 31352 "stat-code", DATA_TYPE_UINT8,
31355 31353 ssc->ssc_uscsi_cmd->uscsi_status,
31356 31354 "un-decode-info", DATA_TYPE_STRING,
31357 31355 ssc->ssc_info,
31358 31356 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31359 31357 senlen, sensep,
31360 31358 NULL);
31361 31359 } else {
31362 31360 /*
31363 31361 * For other type of invalid data, the
31364 31362 * un-decode-value field would be empty because the
31365 31363 * un-decodable content could be seen from upper
31366 31364 * level payload or inside un-decode-info.
31367 31365 */
31368 31366 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31369 31367 NULL,
31370 31368 "cmd.disk.dev.uderr", uscsi_ena, devid,
31371 31369 NULL, DDI_NOSLEEP, NULL,
31372 31370 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31373 31371 DEVID_IF_KNOWN(devid),
31374 31372 "driver-assessment", DATA_TYPE_STRING,
31375 31373 drv_assess == SD_FM_DRV_FATAL ?
31376 31374 "fail" : assessment,
31377 31375 "op-code", DATA_TYPE_UINT8, op_code,
31378 31376 "cdb", DATA_TYPE_UINT8_ARRAY,
31379 31377 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31380 31378 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31381 31379 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31382 31380 "pkt-stats", DATA_TYPE_UINT32,
31383 31381 uscsi_pkt_statistics,
31384 31382 "stat-code", DATA_TYPE_UINT8,
31385 31383 ssc->ssc_uscsi_cmd->uscsi_status,
31386 31384 "un-decode-info", DATA_TYPE_STRING,
31387 31385 ssc->ssc_info,
31388 31386 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31389 31387 0, NULL,
31390 31388 NULL);
31391 31389 }
31392 31390 ssc->ssc_flags &= ~ssc_invalid_flags;
31393 31391 return;
31394 31392 }
31395 31393
31396 31394 if (uscsi_pkt_reason != CMD_CMPLT ||
31397 31395 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
31398 31396 /*
31399 31397 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
31400 31398 * set inside sd_start_cmds due to errors(bad packet or
31401 31399 * fatal transport error), we should take it as a
31402 31400 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31403 31401 * driver-assessment will be set based on drv_assess.
31404 31402 * We will set devid to NULL because it is a transport
31405 31403 * error.
31406 31404 */
31407 31405 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31408 31406 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31409 31407
31410 31408 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31411 31409 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31412 31410 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31413 31411 DEVID_IF_KNOWN(devid),
31414 31412 "driver-assessment", DATA_TYPE_STRING,
31415 31413 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31416 31414 "op-code", DATA_TYPE_UINT8, op_code,
31417 31415 "cdb", DATA_TYPE_UINT8_ARRAY,
31418 31416 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31419 31417 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31420 31418 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
31421 31419 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31422 31420 NULL);
31423 31421 } else {
31424 31422 /*
31425 31423 * If we got here, we have a completed command, and we need
31426 31424 * to further investigate the sense data to see what kind
31427 31425 * of ereport we should post.
31428 31426 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR
31429 31427 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE".
31430 31428 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is
31431 31429 * KEY_MEDIUM_ERROR.
31432 31430 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31433 31431 * driver-assessment will be set based on the parameter
31434 31432 * drv_assess.
31435 31433 */
31436 31434 if (senlen > 0) {
31437 31435 /*
31438 31436 * Here we have sense data available.
31439 31437 */
31440 31438 uint8_t sense_key = scsi_sense_key(sensep);
31441 31439 uint8_t sense_asc = scsi_sense_asc(sensep);
31442 31440 uint8_t sense_ascq = scsi_sense_ascq(sensep);
31443 31441
31444 31442 if (sense_key == KEY_RECOVERABLE_ERROR &&
31445 31443 sense_asc == 0x00 && sense_ascq == 0x1d)
31446 31444 return;
31447 31445
31448 31446 if (sense_key == KEY_MEDIUM_ERROR) {
31449 31447 /*
31450 31448 * driver-assessment should be "fatal" if
31451 31449 * drv_assess is SD_FM_DRV_FATAL.
31452 31450 */
31453 31451 scsi_fm_ereport_post(un->un_sd,
31454 31452 uscsi_path_instance, NULL,
31455 31453 "cmd.disk.dev.rqs.merr",
31456 31454 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31457 31455 FM_VERSION, DATA_TYPE_UINT8,
31458 31456 FM_EREPORT_VERS0,
31459 31457 DEVID_IF_KNOWN(devid),
31460 31458 "driver-assessment",
31461 31459 DATA_TYPE_STRING,
31462 31460 drv_assess == SD_FM_DRV_FATAL ?
31463 31461 "fatal" : assessment,
31464 31462 "op-code",
31465 31463 DATA_TYPE_UINT8, op_code,
31466 31464 "cdb",
31467 31465 DATA_TYPE_UINT8_ARRAY, cdblen,
31468 31466 ssc->ssc_uscsi_cmd->uscsi_cdb,
31469 31467 "pkt-reason",
31470 31468 DATA_TYPE_UINT8, uscsi_pkt_reason,
31471 31469 "pkt-state",
31472 31470 DATA_TYPE_UINT8, uscsi_pkt_state,
31473 31471 "pkt-stats",
31474 31472 DATA_TYPE_UINT32,
31475 31473 uscsi_pkt_statistics,
31476 31474 "stat-code",
31477 31475 DATA_TYPE_UINT8,
31478 31476 ssc->ssc_uscsi_cmd->uscsi_status,
31479 31477 "key",
31480 31478 DATA_TYPE_UINT8,
31481 31479 scsi_sense_key(sensep),
31482 31480 "asc",
31483 31481 DATA_TYPE_UINT8,
31484 31482 scsi_sense_asc(sensep),
31485 31483 "ascq",
31486 31484 DATA_TYPE_UINT8,
31487 31485 scsi_sense_ascq(sensep),
31488 31486 "sense-data",
31489 31487 DATA_TYPE_UINT8_ARRAY,
31490 31488 senlen, sensep,
31491 31489 "lba",
31492 31490 DATA_TYPE_UINT64,
31493 31491 ssc->ssc_uscsi_info->ui_lba,
31494 31492 NULL);
31495 31493 } else {
31496 31494 /*
31497 31495 * if sense-key == 0x4(hardware
31498 31496 * error), driver-assessment should
31499 31497 * be "fatal" if drv_assess is
31500 31498 * SD_FM_DRV_FATAL.
31501 31499 */
31502 31500 scsi_fm_ereport_post(un->un_sd,
31503 31501 uscsi_path_instance, NULL,
31504 31502 "cmd.disk.dev.rqs.derr",
31505 31503 uscsi_ena, devid,
31506 31504 NULL, DDI_NOSLEEP, NULL,
31507 31505 FM_VERSION,
31508 31506 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31509 31507 DEVID_IF_KNOWN(devid),
31510 31508 "driver-assessment",
31511 31509 DATA_TYPE_STRING,
31512 31510 drv_assess == SD_FM_DRV_FATAL ?
31513 31511 (sense_key == 0x4 ?
31514 31512 "fatal" : "fail") : assessment,
31515 31513 "op-code",
31516 31514 DATA_TYPE_UINT8, op_code,
31517 31515 "cdb",
31518 31516 DATA_TYPE_UINT8_ARRAY, cdblen,
31519 31517 ssc->ssc_uscsi_cmd->uscsi_cdb,
31520 31518 "pkt-reason",
31521 31519 DATA_TYPE_UINT8, uscsi_pkt_reason,
31522 31520 "pkt-state",
31523 31521 DATA_TYPE_UINT8, uscsi_pkt_state,
31524 31522 "pkt-stats",
31525 31523 DATA_TYPE_UINT32,
31526 31524 uscsi_pkt_statistics,
31527 31525 "stat-code",
31528 31526 DATA_TYPE_UINT8,
31529 31527 ssc->ssc_uscsi_cmd->uscsi_status,
31530 31528 "key",
31531 31529 DATA_TYPE_UINT8,
31532 31530 scsi_sense_key(sensep),
31533 31531 "asc",
31534 31532 DATA_TYPE_UINT8,
31535 31533 scsi_sense_asc(sensep),
31536 31534 "ascq",
31537 31535 DATA_TYPE_UINT8,
31538 31536 scsi_sense_ascq(sensep),
31539 31537 "sense-data",
31540 31538 DATA_TYPE_UINT8_ARRAY,
31541 31539 senlen, sensep,
31542 31540 NULL);
31543 31541 }
31544 31542 } else {
31545 31543 /*
31546 31544 * For stat_code == STATUS_GOOD, this is not a
31547 31545 * hardware error.
31548 31546 */
31549 31547 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31550 31548 return;
31551 31549
31552 31550 /*
31553 31551 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31554 31552 * stat-code but with sense data unavailable.
31555 31553 * driver-assessment will be set based on parameter
31556 31554 * drv_assess.
31557 31555 */
31558 31556 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31559 31557 NULL,
31560 31558 "cmd.disk.dev.serr", uscsi_ena,
31561 31559 devid, NULL, DDI_NOSLEEP, NULL,
31562 31560 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31563 31561 DEVID_IF_KNOWN(devid),
31564 31562 "driver-assessment", DATA_TYPE_STRING,
31565 31563 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31566 31564 "op-code", DATA_TYPE_UINT8, op_code,
31567 31565 "cdb",
31568 31566 DATA_TYPE_UINT8_ARRAY,
31569 31567 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31570 31568 "pkt-reason",
31571 31569 DATA_TYPE_UINT8, uscsi_pkt_reason,
31572 31570 "pkt-state",
31573 31571 DATA_TYPE_UINT8, uscsi_pkt_state,
31574 31572 "pkt-stats",
31575 31573 DATA_TYPE_UINT32, uscsi_pkt_statistics,
31576 31574 "stat-code",
31577 31575 DATA_TYPE_UINT8,
31578 31576 ssc->ssc_uscsi_cmd->uscsi_status,
31579 31577 NULL);
31580 31578 }
31581 31579 }
31582 31580 }
31583 31581
31584 31582 /*
31585 31583 * Function: sd_ssc_extract_info
31586 31584 *
31587 31585 * Description: Extract information available to help generate ereport.
31588 31586 *
31589 31587 * Context: Kernel thread or interrupt context.
31590 31588 */
31591 31589 static void
31592 31590 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
31593 31591 struct buf *bp, struct sd_xbuf *xp)
31594 31592 {
31595 31593 size_t senlen = 0;
31596 31594 union scsi_cdb *cdbp;
31597 31595 int path_instance;
31598 31596 /*
31599 31597 * Need scsi_cdb_size array to determine the cdb length.
31600 31598 */
31601 31599 extern uchar_t scsi_cdb_size[];
31602 31600
31603 31601 ASSERT(un != NULL);
31604 31602 ASSERT(pktp != NULL);
31605 31603 ASSERT(bp != NULL);
31606 31604 ASSERT(xp != NULL);
31607 31605 ASSERT(ssc != NULL);
31608 31606 ASSERT(mutex_owned(SD_MUTEX(un)));
31609 31607
31610 31608 /*
31611 31609 * Transfer the cdb buffer pointer here.
31612 31610 */
31613 31611 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
31614 31612
31615 31613 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
31616 31614 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
31617 31615
31618 31616 /*
31619 31617 * Transfer the sense data buffer pointer if sense data is available,
31620 31618 * calculate the sense data length first.
31621 31619 */
31622 31620 if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
31623 31621 (xp->xb_sense_state & STATE_ARQ_DONE)) {
31624 31622 /*
31625 31623 * For arq case, we will enter here.
31626 31624 */
31627 31625 if (xp->xb_sense_state & STATE_XARQ_DONE) {
31628 31626 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
31629 31627 } else {
31630 31628 senlen = SENSE_LENGTH;
31631 31629 }
31632 31630 } else {
31633 31631 /*
31634 31632 * For non-arq case, we will enter this branch.
31635 31633 */
31636 31634 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
31637 31635 (xp->xb_sense_state & STATE_XFERRED_DATA)) {
31638 31636 senlen = SENSE_LENGTH - xp->xb_sense_resid;
31639 31637 }
31640 31638
31641 31639 }
31642 31640
31643 31641 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
31644 31642 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
31645 31643 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
31646 31644
31647 31645 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
31648 31646
31649 31647 /*
31650 31648 * Only transfer path_instance when scsi_pkt was properly allocated.
31651 31649 */
31652 31650 path_instance = pktp->pkt_path_instance;
31653 31651 if (scsi_pkt_allocated_correctly(pktp) && path_instance)
31654 31652 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
31655 31653 else
31656 31654 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
31657 31655
31658 31656 /*
31659 31657 * Copy in the other fields we may need when posting ereport.
31660 31658 */
31661 31659 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
31662 31660 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
31663 31661 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
31664 31662 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
31665 31663
31666 31664 /*
31667 31665 * For partially read/write command, we will not create ena
31668 31666 * in case of a successful command be reconized as recovered.
31669 31667 */
31670 31668 if ((pktp->pkt_reason == CMD_CMPLT) &&
31671 31669 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
31672 31670 (senlen == 0)) {
31673 31671 return;
31674 31672 }
31675 31673
31676 31674 /*
31677 31675 * To associate ereports of a single command execution flow, we
31678 31676 * need a shared ena for a specific command.
31679 31677 */
31680 31678 if (xp->xb_ena == 0)
31681 31679 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
31682 31680 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
31683 31681 }
31684 31682
31685 31683
31686 31684 /*
31687 31685 * Function: sd_check_solid_state
31688 31686 *
31689 31687 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31690 31688 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31691 31689 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the
31692 31690 * device is a solid state drive.
31693 31691 *
31694 31692 * Context: Kernel thread or interrupt context.
31695 31693 */
31696 31694
31697 31695 static void
31698 31696 sd_check_solid_state(sd_ssc_t *ssc)
31699 31697 {
31700 31698 int rval = 0;
31701 31699 uchar_t *inqb1 = NULL;
31702 31700 size_t inqb1_len = MAX_INQUIRY_SIZE;
31703 31701 size_t inqb1_resid = 0;
31704 31702 struct sd_lun *un;
31705 31703
31706 31704 ASSERT(ssc != NULL);
31707 31705 un = ssc->ssc_un;
31708 31706 ASSERT(un != NULL);
31709 31707 ASSERT(!mutex_owned(SD_MUTEX(un)));
31710 31708
31711 31709 mutex_enter(SD_MUTEX(un));
31712 31710 un->un_f_is_solid_state = FALSE;
31713 31711
31714 31712 if (ISCD(un)) {
31715 31713 mutex_exit(SD_MUTEX(un));
31716 31714 return;
31717 31715 }
31718 31716
31719 31717 if (sd_check_vpd_page_support(ssc) == 0 &&
31720 31718 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
31721 31719 mutex_exit(SD_MUTEX(un));
31722 31720 /* collect page b1 data */
31723 31721 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
31724 31722
31725 31723 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
31726 31724 0x01, 0xB1, &inqb1_resid);
31727 31725
31728 31726 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
31729 31727 SD_TRACE(SD_LOG_COMMON, un,
31730 31728 "sd_check_solid_state: \
31731 31729 successfully get VPD page: %x \
31732 31730 PAGE LENGTH: %x BYTE 4: %x \
31733 31731 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
31734 31732 inqb1[5]);
31735 31733
31736 31734 mutex_enter(SD_MUTEX(un));
31737 31735 /*
31738 31736 * Check the MEDIUM ROTATION RATE. If it is set
31739 31737 * to 1, the device is a solid state drive.
31740 31738 */
31741 31739 if (inqb1[4] == 0 && inqb1[5] == 1) {
31742 31740 un->un_f_is_solid_state = TRUE;
31743 31741 /* solid state drives don't need disksort */
31744 31742 un->un_f_disksort_disabled = TRUE;
31745 31743 }
31746 31744 mutex_exit(SD_MUTEX(un));
31747 31745 } else if (rval != 0) {
31748 31746 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31749 31747 }
31750 31748
31751 31749 kmem_free(inqb1, inqb1_len);
31752 31750 } else {
31753 31751 mutex_exit(SD_MUTEX(un));
31754 31752 }
31755 31753 }
31756 31754
31757 31755 /*
31758 31756 * Function: sd_check_emulation_mode
31759 31757 *
31760 31758 * Description: Check whether the SSD is at emulation mode
31761 31759 * by issuing READ_CAPACITY_16 to see whether
31762 31760 * we can get physical block size of the drive.
31763 31761 *
31764 31762 * Context: Kernel thread or interrupt context.
31765 31763 */
31766 31764
31767 31765 static void
31768 31766 sd_check_emulation_mode(sd_ssc_t *ssc)
31769 31767 {
31770 31768 int rval = 0;
31771 31769 uint64_t capacity;
31772 31770 uint_t lbasize;
31773 31771 uint_t pbsize;
31774 31772 int i;
31775 31773 int devid_len;
31776 31774 struct sd_lun *un;
31777 31775
31778 31776 ASSERT(ssc != NULL);
31779 31777 un = ssc->ssc_un;
31780 31778 ASSERT(un != NULL);
31781 31779 ASSERT(!mutex_owned(SD_MUTEX(un)));
31782 31780
31783 31781 mutex_enter(SD_MUTEX(un));
31784 31782 if (ISCD(un)) {
31785 31783 mutex_exit(SD_MUTEX(un));
31786 31784 return;
31787 31785 }
31788 31786
31789 31787 if (un->un_f_descr_format_supported) {
31790 31788 mutex_exit(SD_MUTEX(un));
31791 31789 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31792 31790 &pbsize, SD_PATH_DIRECT);
31793 31791 mutex_enter(SD_MUTEX(un));
31794 31792
31795 31793 if (rval != 0) {
31796 31794 un->un_phy_blocksize = DEV_BSIZE;
31797 31795 } else {
31798 31796 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31799 31797 un->un_phy_blocksize = DEV_BSIZE;
31800 31798 } else if (pbsize > un->un_phy_blocksize) {
31801 31799 /*
31802 31800 * Don't reset the physical blocksize
31803 31801 * unless we've detected a larger value.
31804 31802 */
31805 31803 un->un_phy_blocksize = pbsize;
31806 31804 }
31807 31805 }
31808 31806 }
31809 31807
31810 31808 for (i = 0; i < sd_flash_dev_table_size; i++) {
31811 31809 devid_len = (int)strlen(sd_flash_dev_table[i]);
31812 31810 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31813 31811 == SD_SUCCESS) {
31814 31812 un->un_phy_blocksize = SSD_SECSIZE;
31815 31813 if (un->un_f_is_solid_state &&
31816 31814 un->un_phy_blocksize != un->un_tgt_blocksize)
31817 31815 un->un_f_enable_rmw = TRUE;
31818 31816 }
31819 31817 }
31820 31818
31821 31819 mutex_exit(SD_MUTEX(un));
31822 31820 }
↓ open down ↓ |
19298 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX