Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright 2010 QLogic Corporation */
23 23
24 24 /*
25 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 30 */
31 31
32 32 #pragma ident "Copyright 2010 QLogic Corporation; ql_xioctl.c"
33 33
34 34 /*
35 35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 36 *
37 37 * ***********************************************************************
38 38 * * **
39 39 * * NOTICE **
40 40 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
41 41 * * ALL RIGHTS RESERVED **
42 42 * * **
43 43 * ***********************************************************************
44 44 *
45 45 */
46 46
47 47 #include <ql_apps.h>
48 48 #include <ql_api.h>
49 49 #include <ql_debug.h>
50 50 #include <ql_init.h>
51 51 #include <ql_iocb.h>
52 52 #include <ql_ioctl.h>
53 53 #include <ql_mbx.h>
54 54 #include <ql_xioctl.h>
55 55
56 56 /*
57 57 * Local data
58 58 */
59 59
60 60 /*
61 61 * Local prototypes
62 62 */
63 63 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
64 64 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
65 65 boolean_t (*)(EXT_IOCTL *));
66 66 static boolean_t ql_validate_signature(EXT_IOCTL *);
67 67 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
68 68 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
69 69 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
70 70 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
71 71 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
72 72 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
73 73 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
74 74 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
75 75 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
76 76 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
77 77 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
78 78 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
79 79 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
80 80 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
81 81 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
82 82 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
83 83 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
84 84 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
85 85 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
86 86 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
87 87 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
88 88 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
89 89 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
90 90 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
91 91 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
92 92 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
93 93 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
94 94
95 95 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
96 96 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
97 97 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
98 98 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
99 99 uint8_t);
100 100 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
101 101 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
102 102 static int ql_24xx_flash_desc(ql_adapter_state_t *);
103 103 static int ql_setup_flash(ql_adapter_state_t *);
104 104 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
105 105 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
106 106 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
107 107 uint32_t, int);
108 108 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
109 109 uint8_t);
110 110 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
111 111 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
112 112 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
113 113 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
114 114 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
115 115 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
116 116 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
117 117 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
118 118 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
119 119 static uint32_t ql_setup_led(ql_adapter_state_t *);
120 120 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
121 121 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
122 122 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
123 123 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
124 124 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
125 125 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
126 126 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
127 127 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
128 128 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
129 129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 130 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
131 131 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
132 132 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
133 133 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
134 134 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
135 135 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
136 136 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
137 137 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
138 138 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
139 139 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
140 140 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
141 141 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
142 142 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
143 143 static void ql_restart_hba(ql_adapter_state_t *);
144 144 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
145 145 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
146 146 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
147 147 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
148 148 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
149 149 static void ql_update_flash_caches(ql_adapter_state_t *);
150 150 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
151 151 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
152 152 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
153 153 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
154 154 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
155 155 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t);
156 156
157 157 /* ******************************************************************** */
158 158 /* External IOCTL support. */
159 159 /* ******************************************************************** */
160 160
161 161 /*
162 162 * ql_alloc_xioctl_resource
163 163 * Allocates resources needed by module code.
164 164 *
165 165 * Input:
166 166 * ha: adapter state pointer.
167 167 *
168 168 * Returns:
169 169 * SYS_ERRNO
170 170 *
171 171 * Context:
172 172 * Kernel context.
173 173 */
174 174 int
175 175 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
176 176 {
177 177 ql_xioctl_t *xp;
↓ open down ↓ |
177 lines elided |
↑ open up ↑ |
178 178
179 179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
180 180
181 181 if (ha->xioctl != NULL) {
182 182 QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
183 183 ha->instance);
184 184 return (0);
185 185 }
186 186
187 187 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
188 - if (xp == NULL) {
189 - EL(ha, "failed, kmem_zalloc\n");
190 - return (ENOMEM);
191 - }
192 188 ha->xioctl = xp;
193 189
194 190 /* Allocate AEN tracking buffer */
195 191 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
196 192 sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
197 - if (xp->aen_tracking_queue == NULL) {
198 - EL(ha, "failed, kmem_zalloc-2\n");
199 - ql_free_xioctl_resource(ha);
200 - return (ENOMEM);
201 - }
202 193
203 194 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
204 195
205 196 return (0);
206 197 }
207 198
208 199 /*
209 200 * ql_free_xioctl_resource
210 201 * Frees resources used by module code.
211 202 *
212 203 * Input:
213 204 * ha: adapter state pointer.
214 205 *
215 206 * Context:
216 207 * Kernel context.
217 208 */
218 209 void
219 210 ql_free_xioctl_resource(ql_adapter_state_t *ha)
220 211 {
221 212 ql_xioctl_t *xp = ha->xioctl;
222 213
223 214 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
224 215
225 216 if (xp == NULL) {
226 217 QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
227 218 return;
228 219 }
229 220
230 221 if (xp->aen_tracking_queue != NULL) {
231 222 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
232 223 sizeof (EXT_ASYNC_EVENT));
233 224 xp->aen_tracking_queue = NULL;
234 225 }
235 226
236 227 kmem_free(xp, sizeof (ql_xioctl_t));
237 228 ha->xioctl = NULL;
238 229
239 230 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
240 231 }
241 232
242 233 /*
243 234 * ql_xioctl
244 235 * External IOCTL processing.
245 236 *
246 237 * Input:
247 238 * ha: adapter state pointer.
248 239 * cmd: function to perform
249 240 * arg: data type varies with request
250 241 * mode: flags
251 242 * cred_p: credentials pointer
252 243 * rval_p: pointer to result value
253 244 *
254 245 * Returns:
255 246 * 0: success
256 247 * ENXIO: No such device or address
257 248 * ENOPROTOOPT: Protocol not available
258 249 *
259 250 * Context:
260 251 * Kernel context.
261 252 */
262 253 /* ARGSUSED */
263 254 int
264 255 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
265 256 cred_t *cred_p, int *rval_p)
266 257 {
267 258 int rval;
268 259
269 260 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
270 261
271 262 if (ha->xioctl == NULL) {
272 263 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
273 264 return (ENXIO);
274 265 }
275 266
276 267 switch (cmd) {
277 268 case EXT_CC_QUERY:
278 269 case EXT_CC_SEND_FCCT_PASSTHRU:
279 270 case EXT_CC_REG_AEN:
280 271 case EXT_CC_GET_AEN:
281 272 case EXT_CC_SEND_SCSI_PASSTHRU:
282 273 case EXT_CC_WWPN_TO_SCSIADDR:
283 274 case EXT_CC_SEND_ELS_RNID:
284 275 case EXT_CC_SET_DATA:
285 276 case EXT_CC_GET_DATA:
286 277 case EXT_CC_HOST_IDX:
287 278 case EXT_CC_READ_NVRAM:
288 279 case EXT_CC_UPDATE_NVRAM:
289 280 case EXT_CC_READ_OPTION_ROM:
290 281 case EXT_CC_READ_OPTION_ROM_EX:
291 282 case EXT_CC_UPDATE_OPTION_ROM:
292 283 case EXT_CC_UPDATE_OPTION_ROM_EX:
293 284 case EXT_CC_GET_VPD:
294 285 case EXT_CC_SET_VPD:
295 286 case EXT_CC_LOOPBACK:
296 287 case EXT_CC_GET_FCACHE:
297 288 case EXT_CC_GET_FCACHE_EX:
298 289 case EXT_CC_HOST_DRVNAME:
299 290 case EXT_CC_GET_SFP_DATA:
300 291 case EXT_CC_PORT_PARAM:
301 292 case EXT_CC_GET_PCI_DATA:
302 293 case EXT_CC_GET_FWEXTTRACE:
303 294 case EXT_CC_GET_FWFCETRACE:
304 295 case EXT_CC_GET_VP_CNT_ID:
305 296 case EXT_CC_VPORT_CMD:
306 297 case EXT_CC_ACCESS_FLASH:
307 298 case EXT_CC_RESET_FW:
308 299 case EXT_CC_MENLO_MANAGE_INFO:
309 300 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
310 301 break;
311 302 default:
312 303 /* function not supported. */
313 304 EL(ha, "function=%d not supported\n", cmd);
314 305 rval = ENOPROTOOPT;
315 306 }
316 307
317 308 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
318 309
319 310 return (rval);
320 311 }
321 312
322 313 /*
323 314 * ql_sdm_ioctl
324 315 * Provides ioctl functions for SAN/Device Management functions
325 316 * AKA External Ioctl functions.
326 317 *
327 318 * Input:
328 319 * ha: adapter state pointer.
329 320 * ioctl_code: ioctl function to perform
330 321 * arg: Pointer to EXT_IOCTL cmd data in application land.
331 322 * mode: flags
332 323 *
333 324 * Returns:
334 325 * 0: success
335 326 * ENOMEM: Alloc of local EXT_IOCTL struct failed.
336 327 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or
337 328 * copyout of EXT_IOCTL status info failed.
338 329 * EINVAL: Signature or version of caller's EXT_IOCTL invalid.
339 330 * EBUSY: Device busy
340 331 *
341 332 * Context:
342 333 * Kernel context.
343 334 */
344 335 static int
345 336 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
346 337 {
347 338 EXT_IOCTL *cmd;
348 339 int rval;
349 340 ql_adapter_state_t *vha;
350 341
351 342 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
352 343
353 344 /* Copy argument structure (EXT_IOCTL) from application land. */
354 345 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
355 346 ql_validate_signature)) != 0) {
356 347 /*
357 348 * a non-zero value at this time means a problem getting
358 349 * the requested information from application land, just
359 350 * return the error code and hope for the best.
360 351 */
361 352 EL(ha, "failed, sdm_setup\n");
362 353 return (rval);
363 354 }
364 355
365 356 /*
366 357 * Map the physical ha ptr (which the ioctl is called with)
367 358 * to the virtual ha that the caller is addressing.
368 359 */
369 360 if (ha->flags & VP_ENABLED) {
370 361 /* Check that it is within range. */
371 362 if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
372 363 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
373 364 EL(ha, "Invalid HbaSelect vp index: %xh\n",
374 365 cmd->HbaSelect);
375 366 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
376 367 cmd->ResponseLen = 0;
377 368 return (EFAULT);
378 369 }
379 370 /*
380 371 * Special case: HbaSelect == 0 is physical ha
381 372 */
382 373 if (cmd->HbaSelect != 0) {
383 374 vha = ha->vp_next;
384 375 while (vha != NULL) {
385 376 if (vha->vp_index == cmd->HbaSelect) {
386 377 ha = vha;
387 378 break;
388 379 }
389 380 vha = vha->vp_next;
390 381 }
391 382 /*
392 383 * The specified vp index may be valid(within range)
393 384 * but it's not in the list. Currently this is all
394 385 * we can say.
395 386 */
396 387 if (vha == NULL) {
397 388 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
398 389 cmd->ResponseLen = 0;
399 390 return (EFAULT);
400 391 }
401 392 }
402 393 }
403 394
404 395 /*
405 396 * If driver is suspended, stalled, or powered down rtn BUSY
406 397 */
407 398 if (ha->flags & ADAPTER_SUSPENDED ||
408 399 ha->task_daemon_flags & DRIVER_STALL ||
409 400 ha->power_level != PM_LEVEL_D0) {
410 401 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
411 402 "driver suspended" :
412 403 (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
413 404 "FCA powered down"));
414 405 cmd->Status = EXT_STATUS_BUSY;
415 406 cmd->ResponseLen = 0;
416 407 rval = EBUSY;
417 408
418 409 /* Return results to caller */
419 410 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
420 411 EL(ha, "failed, sdm_return\n");
421 412 rval = EFAULT;
422 413 }
423 414 return (rval);
424 415 }
425 416
426 417 switch (ioctl_code) {
427 418 case EXT_CC_QUERY_OS:
428 419 ql_query(ha, cmd, mode);
429 420 break;
430 421 case EXT_CC_SEND_FCCT_PASSTHRU_OS:
431 422 ql_fcct(ha, cmd, mode);
432 423 break;
433 424 case EXT_CC_REG_AEN_OS:
434 425 ql_aen_reg(ha, cmd, mode);
435 426 break;
436 427 case EXT_CC_GET_AEN_OS:
437 428 ql_aen_get(ha, cmd, mode);
438 429 break;
439 430 case EXT_CC_GET_DATA_OS:
440 431 ql_get_host_data(ha, cmd, mode);
441 432 break;
442 433 case EXT_CC_SET_DATA_OS:
443 434 ql_set_host_data(ha, cmd, mode);
444 435 break;
445 436 case EXT_CC_SEND_ELS_RNID_OS:
446 437 ql_send_els_rnid(ha, cmd, mode);
447 438 break;
448 439 case EXT_CC_SCSI_PASSTHRU_OS:
449 440 ql_scsi_passthru(ha, cmd, mode);
450 441 break;
451 442 case EXT_CC_WWPN_TO_SCSIADDR_OS:
452 443 ql_wwpn_to_scsiaddr(ha, cmd, mode);
453 444 break;
454 445 case EXT_CC_HOST_IDX_OS:
455 446 ql_host_idx(ha, cmd, mode);
456 447 break;
457 448 case EXT_CC_HOST_DRVNAME_OS:
458 449 ql_host_drvname(ha, cmd, mode);
459 450 break;
460 451 case EXT_CC_READ_NVRAM_OS:
461 452 ql_read_nvram(ha, cmd, mode);
462 453 break;
463 454 case EXT_CC_UPDATE_NVRAM_OS:
464 455 ql_write_nvram(ha, cmd, mode);
465 456 break;
466 457 case EXT_CC_READ_OPTION_ROM_OS:
467 458 case EXT_CC_READ_OPTION_ROM_EX_OS:
468 459 ql_read_flash(ha, cmd, mode);
469 460 break;
470 461 case EXT_CC_UPDATE_OPTION_ROM_OS:
471 462 case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
472 463 ql_write_flash(ha, cmd, mode);
473 464 break;
474 465 case EXT_CC_LOOPBACK_OS:
475 466 ql_diagnostic_loopback(ha, cmd, mode);
476 467 break;
477 468 case EXT_CC_GET_VPD_OS:
478 469 ql_read_vpd(ha, cmd, mode);
479 470 break;
480 471 case EXT_CC_SET_VPD_OS:
481 472 ql_write_vpd(ha, cmd, mode);
482 473 break;
483 474 case EXT_CC_GET_FCACHE_OS:
484 475 ql_get_fcache(ha, cmd, mode);
485 476 break;
486 477 case EXT_CC_GET_FCACHE_EX_OS:
487 478 ql_get_fcache_ex(ha, cmd, mode);
488 479 break;
489 480 case EXT_CC_GET_SFP_DATA_OS:
490 481 ql_get_sfp(ha, cmd, mode);
491 482 break;
492 483 case EXT_CC_PORT_PARAM_OS:
493 484 ql_port_param(ha, cmd, mode);
494 485 break;
495 486 case EXT_CC_GET_PCI_DATA_OS:
496 487 ql_get_pci_data(ha, cmd, mode);
497 488 break;
498 489 case EXT_CC_GET_FWEXTTRACE_OS:
499 490 ql_get_fwexttrace(ha, cmd, mode);
500 491 break;
501 492 case EXT_CC_GET_FWFCETRACE_OS:
502 493 ql_get_fwfcetrace(ha, cmd, mode);
503 494 break;
504 495 case EXT_CC_MENLO_RESET:
505 496 ql_menlo_reset(ha, cmd, mode);
506 497 break;
507 498 case EXT_CC_MENLO_GET_FW_VERSION:
508 499 ql_menlo_get_fw_version(ha, cmd, mode);
509 500 break;
510 501 case EXT_CC_MENLO_UPDATE_FW:
511 502 ql_menlo_update_fw(ha, cmd, mode);
512 503 break;
513 504 case EXT_CC_MENLO_MANAGE_INFO:
514 505 ql_menlo_manage_info(ha, cmd, mode);
515 506 break;
516 507 case EXT_CC_GET_VP_CNT_ID_OS:
517 508 ql_get_vp_cnt_id(ha, cmd, mode);
518 509 break;
519 510 case EXT_CC_VPORT_CMD_OS:
520 511 ql_vp_ioctl(ha, cmd, mode);
521 512 break;
522 513 case EXT_CC_ACCESS_FLASH_OS:
523 514 ql_access_flash(ha, cmd, mode);
524 515 break;
525 516 case EXT_CC_RESET_FW_OS:
526 517 ql_reset_cmd(ha, cmd);
527 518 break;
528 519 default:
529 520 /* function not supported. */
530 521 EL(ha, "failed, function not supported=%d\n", ioctl_code);
531 522
532 523 cmd->Status = EXT_STATUS_INVALID_REQUEST;
533 524 cmd->ResponseLen = 0;
534 525 break;
535 526 }
536 527
537 528 /* Return results to caller */
538 529 if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
539 530 EL(ha, "failed, sdm_return\n");
540 531 return (EFAULT);
541 532 }
542 533
543 534 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
544 535
545 536 return (0);
546 537 }
547 538
548 539 /*
549 540 * ql_sdm_setup
550 541 * Make a local copy of the EXT_IOCTL struct and validate it.
551 542 *
552 543 * Input:
553 544 * ha: adapter state pointer.
554 545 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL.
555 546 * arg: Address of application EXT_IOCTL cmd data
556 547 * mode: flags
557 548 * val_sig: Pointer to a function to validate the ioctl signature.
558 549 *
559 550 * Returns:
560 551 * 0: success
561 552 * EFAULT: Copy in error of application EXT_IOCTL struct.
562 553 * EINVAL: Invalid version, signature.
563 554 * ENOMEM: Local allocation of EXT_IOCTL failed.
564 555 *
565 556 * Context:
566 557 * Kernel context.
567 558 */
568 559 static int
569 560 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
570 561 int mode, boolean_t (*val_sig)(EXT_IOCTL *))
571 562 {
572 563 int rval;
573 564 EXT_IOCTL *cmd;
574 565
575 566 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
576 567
577 568 /* Allocate local memory for EXT_IOCTL. */
578 569 *cmd_struct = NULL;
579 570 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
580 571 if (cmd == NULL) {
581 572 EL(ha, "failed, kmem_zalloc\n");
582 573 return (ENOMEM);
583 574 }
584 575 /* Get argument structure. */
585 576 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
586 577 if (rval != 0) {
587 578 EL(ha, "failed, ddi_copyin\n");
588 579 rval = EFAULT;
589 580 } else {
590 581 /*
591 582 * Check signature and the version.
592 583 * If either are not valid then neither is the
593 584 * structure so don't attempt to return any error status
594 585 * because we can't trust what caller's arg points to.
595 586 * Just return the errno.
596 587 */
597 588 if (val_sig(cmd) == 0) {
598 589 EL(ha, "failed, signature\n");
599 590 rval = EINVAL;
600 591 } else if (cmd->Version > EXT_VERSION) {
601 592 EL(ha, "failed, version\n");
602 593 rval = EINVAL;
603 594 }
604 595 }
605 596
606 597 if (rval == 0) {
607 598 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
608 599 *cmd_struct = cmd;
609 600 cmd->Status = EXT_STATUS_OK;
610 601 cmd->DetailStatus = 0;
611 602 } else {
612 603 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
613 604 }
614 605
615 606 return (rval);
616 607 }
617 608
618 609 /*
619 610 * ql_validate_signature
620 611 * Validate the signature string for an external ioctl call.
621 612 *
622 613 * Input:
623 614 * sg: Pointer to EXT_IOCTL signature to validate.
624 615 *
625 616 * Returns:
626 617 * B_TRUE: Signature is valid.
627 618 * B_FALSE: Signature is NOT valid.
628 619 *
629 620 * Context:
630 621 * Kernel context.
631 622 */
632 623 static boolean_t
633 624 ql_validate_signature(EXT_IOCTL *cmd_struct)
634 625 {
635 626 /*
636 627 * Check signature.
637 628 *
638 629 * If signature is not valid then neither is the rest of
639 630 * the structure (e.g., can't trust it), so don't attempt
640 631 * to return any error status other than the errno.
641 632 */
642 633 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
643 634 QL_PRINT_2(CE_CONT, "failed,\n");
644 635 return (B_FALSE);
645 636 }
646 637
647 638 return (B_TRUE);
648 639 }
649 640
650 641 /*
651 642 * ql_sdm_return
652 643 * Copies return data/status to application land for
653 644 * ioctl call using the SAN/Device Management EXT_IOCTL call interface.
654 645 *
655 646 * Input:
656 647 * ha: adapter state pointer.
657 648 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct.
658 649 * ioctl_code: ioctl function to perform
659 650 * arg: EXT_IOCTL cmd data in application land.
660 651 * mode: flags
661 652 *
662 653 * Returns:
663 654 * 0: success
664 655 * EFAULT: Copy out error.
665 656 *
666 657 * Context:
667 658 * Kernel context.
668 659 */
669 660 /* ARGSUSED */
670 661 static int
671 662 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
672 663 {
673 664 int rval = 0;
674 665
675 666 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
676 667
677 668 rval |= ddi_copyout((void *)&cmd->ResponseLen,
678 669 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
679 670 mode);
680 671
681 672 rval |= ddi_copyout((void *)&cmd->Status,
682 673 (void *)&(((EXT_IOCTL*)arg)->Status),
683 674 sizeof (cmd->Status), mode);
684 675 rval |= ddi_copyout((void *)&cmd->DetailStatus,
685 676 (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
686 677 sizeof (cmd->DetailStatus), mode);
687 678
688 679 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
689 680
690 681 if (rval != 0) {
691 682 /* Some copyout operation failed */
692 683 EL(ha, "failed, ddi_copyout\n");
693 684 return (EFAULT);
694 685 }
695 686
696 687 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
697 688
698 689 return (0);
699 690 }
700 691
701 692 /*
702 693 * ql_query
703 694 * Performs all EXT_CC_QUERY functions.
704 695 *
705 696 * Input:
706 697 * ha: adapter state pointer.
707 698 * cmd: Local EXT_IOCTL cmd struct pointer.
708 699 * mode: flags.
709 700 *
710 701 * Returns:
711 702 * None, request status indicated in cmd->Status.
712 703 *
713 704 * Context:
714 705 * Kernel context.
715 706 */
716 707 static void
717 708 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
718 709 {
719 710 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
720 711 cmd->SubCode);
721 712
722 713 /* case off on command subcode */
723 714 switch (cmd->SubCode) {
724 715 case EXT_SC_QUERY_HBA_NODE:
725 716 ql_qry_hba_node(ha, cmd, mode);
726 717 break;
727 718 case EXT_SC_QUERY_HBA_PORT:
728 719 ql_qry_hba_port(ha, cmd, mode);
729 720 break;
730 721 case EXT_SC_QUERY_DISC_PORT:
731 722 ql_qry_disc_port(ha, cmd, mode);
732 723 break;
733 724 case EXT_SC_QUERY_DISC_TGT:
734 725 ql_qry_disc_tgt(ha, cmd, mode);
735 726 break;
736 727 case EXT_SC_QUERY_DRIVER:
737 728 ql_qry_driver(ha, cmd, mode);
738 729 break;
739 730 case EXT_SC_QUERY_FW:
740 731 ql_qry_fw(ha, cmd, mode);
741 732 break;
742 733 case EXT_SC_QUERY_CHIP:
743 734 ql_qry_chip(ha, cmd, mode);
744 735 break;
745 736 case EXT_SC_QUERY_CNA_PORT:
746 737 ql_qry_cna_port(ha, cmd, mode);
747 738 break;
748 739 case EXT_SC_QUERY_ADAPTER_VERSIONS:
749 740 ql_qry_adapter_versions(ha, cmd, mode);
750 741 break;
751 742 case EXT_SC_QUERY_DISC_LUN:
752 743 default:
753 744 /* function not supported. */
754 745 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
755 746 EL(ha, "failed, Unsupported Subcode=%xh\n",
756 747 cmd->SubCode);
757 748 break;
758 749 }
759 750
760 751 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
761 752 }
762 753
763 754 /*
764 755 * ql_qry_hba_node
765 756 * Performs EXT_SC_QUERY_HBA_NODE subfunction.
766 757 *
767 758 * Input:
768 759 * ha: adapter state pointer.
769 760 * cmd: EXT_IOCTL cmd struct pointer.
770 761 * mode: flags.
771 762 *
772 763 * Returns:
773 764 * None, request status indicated in cmd->Status.
774 765 *
775 766 * Context:
776 767 * Kernel context.
777 768 */
778 769 static void
779 770 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
780 771 {
781 772 EXT_HBA_NODE tmp_node = {0};
782 773 uint_t len;
783 774 caddr_t bufp;
784 775
785 776 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
786 777
787 778 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
788 779 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
789 780 cmd->DetailStatus = sizeof (EXT_HBA_NODE);
790 781 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
791 782 "Len=%xh\n", cmd->ResponseLen);
792 783 cmd->ResponseLen = 0;
793 784 return;
794 785 }
795 786
796 787 /* fill in the values */
797 788
798 789 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
799 790 EXT_DEF_WWN_NAME_SIZE);
800 791
801 792 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
802 793
803 794 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
804 795
805 796 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
806 797
807 798 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
808 799
809 800 if (CFG_IST(ha, CFG_SBUS_CARD)) {
810 801 size_t verlen;
811 802 uint16_t w;
812 803 char *tmpptr;
813 804
814 805 verlen = strlen((char *)(tmp_node.DriverVersion));
815 806 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
816 807 EL(ha, "failed, No room for fpga version string\n");
817 808 } else {
818 809 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
819 810 (uint16_t *)
820 811 (ha->sbus_fpga_iobase + FPGA_REVISION));
821 812
822 813 tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
823 814 if (tmpptr == NULL) {
824 815 EL(ha, "Unable to insert fpga version str\n");
825 816 } else {
826 817 (void) sprintf(tmpptr, "%d.%d",
827 818 ((w & 0xf0) >> 4), (w & 0x0f));
828 819 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
829 820 }
830 821 }
831 822 }
832 823
833 824 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
834 825 ha->fw_major_version, ha->fw_minor_version,
835 826 ha->fw_subminor_version);
836 827
837 828 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
838 829 switch (ha->fw_attributes) {
839 830 case FWATTRIB_EF:
840 831 (void) strcat((char *)(tmp_node.FWVersion), " EF");
841 832 break;
842 833 case FWATTRIB_TP:
843 834 (void) strcat((char *)(tmp_node.FWVersion), " TP");
844 835 break;
845 836 case FWATTRIB_IP:
846 837 (void) strcat((char *)(tmp_node.FWVersion), " IP");
847 838 break;
848 839 case FWATTRIB_IPX:
849 840 (void) strcat((char *)(tmp_node.FWVersion), " IPX");
850 841 break;
851 842 case FWATTRIB_FL:
852 843 (void) strcat((char *)(tmp_node.FWVersion), " FL");
853 844 break;
854 845 case FWATTRIB_FPX:
855 846 (void) strcat((char *)(tmp_node.FWVersion), " FLX");
856 847 break;
857 848 default:
858 849 break;
859 850 }
860 851 }
861 852
862 853 /* FCode version. */
863 854 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
864 855 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
865 856 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
866 857 (int *)&len) == DDI_PROP_SUCCESS) {
867 858 if (len < EXT_DEF_MAX_STR_SIZE) {
868 859 bcopy(bufp, tmp_node.OptRomVersion, len);
869 860 } else {
870 861 bcopy(bufp, tmp_node.OptRomVersion,
871 862 EXT_DEF_MAX_STR_SIZE - 1);
872 863 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
873 864 '\0';
874 865 }
875 866 kmem_free(bufp, len);
876 867 } else {
877 868 (void) sprintf((char *)tmp_node.OptRomVersion, "0");
878 869 }
879 870 tmp_node.PortCount = 1;
880 871 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
881 872
882 873 if (ddi_copyout((void *)&tmp_node,
883 874 (void *)(uintptr_t)(cmd->ResponseAdr),
884 875 sizeof (EXT_HBA_NODE), mode) != 0) {
885 876 cmd->Status = EXT_STATUS_COPY_ERR;
886 877 cmd->ResponseLen = 0;
887 878 EL(ha, "failed, ddi_copyout\n");
888 879 } else {
889 880 cmd->ResponseLen = sizeof (EXT_HBA_NODE);
890 881 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
891 882 }
892 883 }
893 884
894 885 /*
895 886 * ql_qry_hba_port
896 887 * Performs EXT_SC_QUERY_HBA_PORT subfunction.
897 888 *
898 889 * Input:
899 890 * ha: adapter state pointer.
900 891 * cmd: EXT_IOCTL cmd struct pointer.
901 892 * mode: flags.
902 893 *
903 894 * Returns:
904 895 * None, request status indicated in cmd->Status.
905 896 *
906 897 * Context:
907 898 * Kernel context.
908 899 */
909 900 static void
910 901 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
911 902 {
912 903 ql_link_t *link;
913 904 ql_tgt_t *tq;
914 905 ql_mbx_data_t mr;
915 906 EXT_HBA_PORT tmp_port = {0};
916 907 int rval;
917 908 uint16_t port_cnt, tgt_cnt, index;
918 909
919 910 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
920 911
921 912 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
922 913 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
923 914 cmd->DetailStatus = sizeof (EXT_HBA_PORT);
924 915 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
925 916 cmd->ResponseLen);
926 917 cmd->ResponseLen = 0;
927 918 return;
928 919 }
929 920
930 921 /* fill in the values */
931 922
932 923 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
933 924 EXT_DEF_WWN_NAME_SIZE);
934 925 tmp_port.Id[0] = 0;
935 926 tmp_port.Id[1] = ha->d_id.b.domain;
936 927 tmp_port.Id[2] = ha->d_id.b.area;
937 928 tmp_port.Id[3] = ha->d_id.b.al_pa;
938 929
939 930 /* For now we are initiator only driver */
940 931 tmp_port.Type = EXT_DEF_INITIATOR_DEV;
941 932
942 933 if (ha->task_daemon_flags & LOOP_DOWN) {
943 934 tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
944 935 } else if (DRIVER_SUSPENDED(ha)) {
945 936 tmp_port.State = EXT_DEF_HBA_SUSPENDED;
946 937 } else {
947 938 tmp_port.State = EXT_DEF_HBA_OK;
948 939 }
949 940
950 941 if (ha->flags & POINT_TO_POINT) {
951 942 tmp_port.Mode = EXT_DEF_P2P_MODE;
952 943 } else {
953 944 tmp_port.Mode = EXT_DEF_LOOP_MODE;
954 945 }
955 946 /*
956 947 * fill in the portspeed values.
957 948 *
958 949 * default to not yet negotiated state
959 950 */
960 951 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
961 952
962 953 if (tmp_port.State == EXT_DEF_HBA_OK) {
963 954 switch (ha->iidma_rate) {
964 955 case IIDMA_RATE_1GB:
965 956 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
966 957 break;
967 958 case IIDMA_RATE_2GB:
968 959 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
969 960 break;
970 961 case IIDMA_RATE_4GB:
971 962 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
972 963 break;
973 964 case IIDMA_RATE_8GB:
974 965 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
975 966 break;
976 967 case IIDMA_RATE_10GB:
977 968 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
978 969 break;
979 970 default:
980 971 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
981 972 EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
982 973 break;
983 974 }
984 975 }
985 976
986 977 /* Report all supported port speeds */
987 978 if (CFG_IST(ha, CFG_CTRL_25XX)) {
988 979 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
989 980 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
990 981 EXT_DEF_PORTSPEED_1GBIT);
991 982 /*
992 983 * Correct supported speeds based on type of
993 984 * sfp that is present
994 985 */
995 986 switch (ha->sfp_stat) {
996 987 case 1:
997 988 /* no sfp detected */
998 989 break;
999 990 case 2:
1000 991 case 4:
1001 992 /* 4GB sfp */
1002 993 tmp_port.PortSupportedSpeed &=
1003 994 ~EXT_DEF_PORTSPEED_8GBIT;
1004 995 break;
1005 996 case 3:
1006 997 case 5:
1007 998 /* 8GB sfp */
1008 999 tmp_port.PortSupportedSpeed &=
1009 1000 ~EXT_DEF_PORTSPEED_1GBIT;
1010 1001 break;
1011 1002 default:
1012 1003 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1013 1004 break;
1014 1005
1015 1006 }
1016 1007 } else if (CFG_IST(ha, CFG_CTRL_8081)) {
1017 1008 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1018 1009 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
1019 1010 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1020 1011 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1021 1012 } else if (CFG_IST(ha, CFG_CTRL_2300)) {
1022 1013 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1023 1014 EXT_DEF_PORTSPEED_1GBIT);
1024 1015 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
1025 1016 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1026 1017 } else if (CFG_IST(ha, CFG_CTRL_2200)) {
1027 1018 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1028 1019 } else {
1029 1020 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1030 1021 EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1031 1022 }
1032 1023 tmp_port.LinkState2 = LSB(ha->sfp_stat);
1033 1024 port_cnt = 0;
1034 1025 tgt_cnt = 0;
1035 1026
1036 1027 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1037 1028 for (link = ha->dev[index].first; link != NULL;
1038 1029 link = link->next) {
1039 1030 tq = link->base_address;
1040 1031
1041 1032 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1042 1033 continue;
1043 1034 }
1044 1035
1045 1036 port_cnt++;
1046 1037 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1047 1038 tgt_cnt++;
1048 1039 }
1049 1040 }
1050 1041 }
1051 1042
1052 1043 tmp_port.DiscPortCount = port_cnt;
1053 1044 tmp_port.DiscTargetCount = tgt_cnt;
1054 1045
1055 1046 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1056 1047
1057 1048 rval = ddi_copyout((void *)&tmp_port,
1058 1049 (void *)(uintptr_t)(cmd->ResponseAdr),
1059 1050 sizeof (EXT_HBA_PORT), mode);
1060 1051 if (rval != 0) {
1061 1052 cmd->Status = EXT_STATUS_COPY_ERR;
1062 1053 cmd->ResponseLen = 0;
1063 1054 EL(ha, "failed, ddi_copyout\n");
1064 1055 } else {
1065 1056 cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1066 1057 QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1067 1058 ha->instance, port_cnt, tgt_cnt);
1068 1059 }
1069 1060 }
1070 1061
1071 1062 /*
1072 1063 * ql_qry_disc_port
1073 1064 * Performs EXT_SC_QUERY_DISC_PORT subfunction.
1074 1065 *
1075 1066 * Input:
1076 1067 * ha: adapter state pointer.
1077 1068 * cmd: EXT_IOCTL cmd struct pointer.
1078 1069 * mode: flags.
1079 1070 *
1080 1071 * cmd->Instance = Port instance in fcport chain.
1081 1072 *
1082 1073 * Returns:
1083 1074 * None, request status indicated in cmd->Status.
1084 1075 *
1085 1076 * Context:
1086 1077 * Kernel context.
1087 1078 */
1088 1079 static void
1089 1080 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1090 1081 {
1091 1082 EXT_DISC_PORT tmp_port = {0};
1092 1083 ql_link_t *link;
1093 1084 ql_tgt_t *tq;
1094 1085 uint16_t index;
1095 1086 uint16_t inst = 0;
1096 1087
1097 1088 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1098 1089
1099 1090 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1100 1091 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1101 1092 cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1102 1093 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1103 1094 cmd->ResponseLen);
1104 1095 cmd->ResponseLen = 0;
1105 1096 return;
1106 1097 }
1107 1098
1108 1099 for (link = NULL, index = 0;
1109 1100 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1110 1101 for (link = ha->dev[index].first; link != NULL;
1111 1102 link = link->next) {
1112 1103 tq = link->base_address;
1113 1104
1114 1105 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1115 1106 continue;
1116 1107 }
1117 1108 if (inst != cmd->Instance) {
1118 1109 inst++;
1119 1110 continue;
1120 1111 }
1121 1112
1122 1113 /* fill in the values */
1123 1114 bcopy(tq->node_name, tmp_port.WWNN,
1124 1115 EXT_DEF_WWN_NAME_SIZE);
1125 1116 bcopy(tq->port_name, tmp_port.WWPN,
1126 1117 EXT_DEF_WWN_NAME_SIZE);
1127 1118
1128 1119 break;
1129 1120 }
1130 1121 }
1131 1122
1132 1123 if (link == NULL) {
1133 1124 /* no matching device */
1134 1125 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1135 1126 EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1136 1127 cmd->ResponseLen = 0;
1137 1128 return;
1138 1129 }
1139 1130
1140 1131 tmp_port.Id[0] = 0;
1141 1132 tmp_port.Id[1] = tq->d_id.b.domain;
1142 1133 tmp_port.Id[2] = tq->d_id.b.area;
1143 1134 tmp_port.Id[3] = tq->d_id.b.al_pa;
1144 1135
1145 1136 tmp_port.Type = 0;
1146 1137 if (tq->flags & TQF_INITIATOR_DEVICE) {
1147 1138 tmp_port.Type = (uint16_t)(tmp_port.Type |
1148 1139 EXT_DEF_INITIATOR_DEV);
1149 1140 } else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1150 1141 (void) ql_inq_scan(ha, tq, 1);
1151 1142 } else if (tq->flags & TQF_TAPE_DEVICE) {
1152 1143 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1153 1144 }
1154 1145
1155 1146 if (tq->flags & TQF_FABRIC_DEVICE) {
1156 1147 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1157 1148 } else {
1158 1149 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1159 1150 }
1160 1151
1161 1152 tmp_port.Status = 0;
1162 1153 tmp_port.Bus = 0; /* Hard-coded for Solaris */
1163 1154
1164 1155 bcopy(tq->port_name, &tmp_port.TargetId, 8);
1165 1156
1166 1157 if (ddi_copyout((void *)&tmp_port,
1167 1158 (void *)(uintptr_t)(cmd->ResponseAdr),
1168 1159 sizeof (EXT_DISC_PORT), mode) != 0) {
1169 1160 cmd->Status = EXT_STATUS_COPY_ERR;
1170 1161 cmd->ResponseLen = 0;
1171 1162 EL(ha, "failed, ddi_copyout\n");
1172 1163 } else {
1173 1164 cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1174 1165 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1175 1166 }
1176 1167 }
1177 1168
1178 1169 /*
1179 1170 * ql_qry_disc_tgt
1180 1171 * Performs EXT_SC_QUERY_DISC_TGT subfunction.
1181 1172 *
1182 1173 * Input:
1183 1174 * ha: adapter state pointer.
1184 1175 * cmd: EXT_IOCTL cmd struct pointer.
1185 1176 * mode: flags.
1186 1177 *
1187 1178 * cmd->Instance = Port instance in fcport chain.
1188 1179 *
1189 1180 * Returns:
1190 1181 * None, request status indicated in cmd->Status.
1191 1182 *
1192 1183 * Context:
1193 1184 * Kernel context.
1194 1185 */
1195 1186 static void
1196 1187 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1197 1188 {
1198 1189 EXT_DISC_TARGET tmp_tgt = {0};
1199 1190 ql_link_t *link;
1200 1191 ql_tgt_t *tq;
1201 1192 uint16_t index;
1202 1193 uint16_t inst = 0;
1203 1194
1204 1195 QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1205 1196 cmd->Instance);
1206 1197
1207 1198 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1208 1199 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1209 1200 cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1210 1201 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1211 1202 cmd->ResponseLen);
1212 1203 cmd->ResponseLen = 0;
1213 1204 return;
1214 1205 }
1215 1206
1216 1207 /* Scan port list for requested target and fill in the values */
1217 1208 for (link = NULL, index = 0;
1218 1209 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1219 1210 for (link = ha->dev[index].first; link != NULL;
1220 1211 link = link->next) {
1221 1212 tq = link->base_address;
1222 1213
1223 1214 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1224 1215 tq->flags & TQF_INITIATOR_DEVICE) {
1225 1216 continue;
1226 1217 }
1227 1218 if (inst != cmd->Instance) {
1228 1219 inst++;
1229 1220 continue;
1230 1221 }
1231 1222
1232 1223 /* fill in the values */
1233 1224 bcopy(tq->node_name, tmp_tgt.WWNN,
1234 1225 EXT_DEF_WWN_NAME_SIZE);
1235 1226 bcopy(tq->port_name, tmp_tgt.WWPN,
1236 1227 EXT_DEF_WWN_NAME_SIZE);
1237 1228
1238 1229 break;
1239 1230 }
1240 1231 }
1241 1232
1242 1233 if (link == NULL) {
1243 1234 /* no matching device */
1244 1235 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1245 1236 cmd->DetailStatus = EXT_DSTATUS_TARGET;
1246 1237 EL(ha, "failed, not found target=%d\n", cmd->Instance);
1247 1238 cmd->ResponseLen = 0;
1248 1239 return;
1249 1240 }
1250 1241 tmp_tgt.Id[0] = 0;
1251 1242 tmp_tgt.Id[1] = tq->d_id.b.domain;
1252 1243 tmp_tgt.Id[2] = tq->d_id.b.area;
1253 1244 tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1254 1245
1255 1246 tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1256 1247
1257 1248 if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1258 1249 (void) ql_inq_scan(ha, tq, 1);
1259 1250 }
1260 1251
1261 1252 tmp_tgt.Type = 0;
1262 1253 if (tq->flags & TQF_TAPE_DEVICE) {
1263 1254 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1264 1255 }
1265 1256
1266 1257 if (tq->flags & TQF_FABRIC_DEVICE) {
1267 1258 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1268 1259 } else {
1269 1260 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1270 1261 }
1271 1262
1272 1263 tmp_tgt.Status = 0;
1273 1264
1274 1265 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */
1275 1266
1276 1267 bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1277 1268
1278 1269 if (ddi_copyout((void *)&tmp_tgt,
1279 1270 (void *)(uintptr_t)(cmd->ResponseAdr),
1280 1271 sizeof (EXT_DISC_TARGET), mode) != 0) {
1281 1272 cmd->Status = EXT_STATUS_COPY_ERR;
1282 1273 cmd->ResponseLen = 0;
1283 1274 EL(ha, "failed, ddi_copyout\n");
1284 1275 } else {
1285 1276 cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1286 1277 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1287 1278 }
1288 1279 }
1289 1280
1290 1281 /*
1291 1282 * ql_qry_fw
1292 1283 * Performs EXT_SC_QUERY_FW subfunction.
1293 1284 *
1294 1285 * Input:
1295 1286 * ha: adapter state pointer.
1296 1287 * cmd: EXT_IOCTL cmd struct pointer.
1297 1288 * mode: flags.
1298 1289 *
1299 1290 * Returns:
1300 1291 * None, request status indicated in cmd->Status.
1301 1292 *
1302 1293 * Context:
1303 1294 * Kernel context.
1304 1295 */
1305 1296 static void
1306 1297 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1307 1298 {
1308 1299 EXT_FW fw_info = {0};
1309 1300
1310 1301 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1311 1302
1312 1303 if (cmd->ResponseLen < sizeof (EXT_FW)) {
1313 1304 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1314 1305 cmd->DetailStatus = sizeof (EXT_FW);
1315 1306 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1316 1307 cmd->ResponseLen);
1317 1308 cmd->ResponseLen = 0;
1318 1309 return;
1319 1310 }
1320 1311
1321 1312 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1322 1313 ha->fw_major_version, ha->fw_minor_version,
1323 1314 ha->fw_subminor_version);
1324 1315
1325 1316 fw_info.Attrib = ha->fw_attributes;
1326 1317
1327 1318 if (ddi_copyout((void *)&fw_info,
1328 1319 (void *)(uintptr_t)(cmd->ResponseAdr),
1329 1320 sizeof (EXT_FW), mode) != 0) {
1330 1321 cmd->Status = EXT_STATUS_COPY_ERR;
1331 1322 cmd->ResponseLen = 0;
1332 1323 EL(ha, "failed, ddi_copyout\n");
1333 1324 return;
1334 1325 } else {
1335 1326 cmd->ResponseLen = sizeof (EXT_FW);
1336 1327 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1337 1328 }
1338 1329 }
1339 1330
1340 1331 /*
1341 1332 * ql_qry_chip
1342 1333 * Performs EXT_SC_QUERY_CHIP subfunction.
1343 1334 *
1344 1335 * Input:
1345 1336 * ha: adapter state pointer.
1346 1337 * cmd: EXT_IOCTL cmd struct pointer.
1347 1338 * mode: flags.
1348 1339 *
1349 1340 * Returns:
1350 1341 * None, request status indicated in cmd->Status.
1351 1342 *
1352 1343 * Context:
1353 1344 * Kernel context.
1354 1345 */
1355 1346 static void
1356 1347 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1357 1348 {
1358 1349 EXT_CHIP chip = {0};
1359 1350
1360 1351 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1361 1352
1362 1353 if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1363 1354 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1364 1355 cmd->DetailStatus = sizeof (EXT_CHIP);
1365 1356 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1366 1357 cmd->ResponseLen);
1367 1358 cmd->ResponseLen = 0;
1368 1359 return;
1369 1360 }
1370 1361
1371 1362 chip.VendorId = ha->ven_id;
1372 1363 chip.DeviceId = ha->device_id;
1373 1364 chip.SubVendorId = ha->subven_id;
1374 1365 chip.SubSystemId = ha->subsys_id;
1375 1366 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1376 1367 chip.IoAddrLen = 0x100;
1377 1368 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1378 1369 chip.MemAddrLen = 0x100;
1379 1370 chip.ChipRevID = ha->rev_id;
1380 1371 if (ha->flags & FUNCTION_1) {
1381 1372 chip.FuncNo = 1;
1382 1373 }
1383 1374
1384 1375 if (ddi_copyout((void *)&chip,
1385 1376 (void *)(uintptr_t)(cmd->ResponseAdr),
1386 1377 sizeof (EXT_CHIP), mode) != 0) {
1387 1378 cmd->Status = EXT_STATUS_COPY_ERR;
1388 1379 cmd->ResponseLen = 0;
1389 1380 EL(ha, "failed, ddi_copyout\n");
1390 1381 } else {
1391 1382 cmd->ResponseLen = sizeof (EXT_CHIP);
1392 1383 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1393 1384 }
1394 1385 }
1395 1386
1396 1387 /*
1397 1388 * ql_qry_driver
1398 1389 * Performs EXT_SC_QUERY_DRIVER subfunction.
1399 1390 *
1400 1391 * Input:
1401 1392 * ha: adapter state pointer.
1402 1393 * cmd: EXT_IOCTL cmd struct pointer.
1403 1394 * mode: flags.
1404 1395 *
1405 1396 * Returns:
1406 1397 * None, request status indicated in cmd->Status.
1407 1398 *
1408 1399 * Context:
1409 1400 * Kernel context.
1410 1401 */
1411 1402 static void
1412 1403 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1413 1404 {
1414 1405 EXT_DRIVER qd = {0};
1415 1406
1416 1407 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1417 1408
1418 1409 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1419 1410 cmd->Status = EXT_STATUS_DATA_OVERRUN;
1420 1411 cmd->DetailStatus = sizeof (EXT_DRIVER);
1421 1412 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1422 1413 cmd->ResponseLen);
1423 1414 cmd->ResponseLen = 0;
1424 1415 return;
1425 1416 }
1426 1417
1427 1418 (void) strcpy((void *)&qd.Version[0], QL_VERSION);
1428 1419 qd.NumOfBus = 1; /* Fixed for Solaris */
1429 1420 qd.TargetsPerBus = (uint16_t)
1430 1421 (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1431 1422 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1432 1423 qd.LunsPerTarget = 2030;
1433 1424 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1434 1425 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1435 1426
1436 1427 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1437 1428 sizeof (EXT_DRIVER), mode) != 0) {
1438 1429 cmd->Status = EXT_STATUS_COPY_ERR;
1439 1430 cmd->ResponseLen = 0;
1440 1431 EL(ha, "failed, ddi_copyout\n");
1441 1432 } else {
1442 1433 cmd->ResponseLen = sizeof (EXT_DRIVER);
1443 1434 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1444 1435 }
1445 1436 }
1446 1437
1447 1438 /*
1448 1439 * ql_fcct
1449 1440 * IOCTL management server FC-CT passthrough.
1450 1441 *
1451 1442 * Input:
1452 1443 * ha: adapter state pointer.
1453 1444 * cmd: User space CT arguments pointer.
1454 1445 * mode: flags.
1455 1446 *
1456 1447 * Returns:
1457 1448 * None, request status indicated in cmd->Status.
1458 1449 *
1459 1450 * Context:
1460 1451 * Kernel context.
1461 1452 */
1462 1453 static void
1463 1454 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1464 1455 {
1465 1456 ql_mbx_iocb_t *pkt;
1466 1457 ql_mbx_data_t mr;
1467 1458 dma_mem_t *dma_mem;
1468 1459 caddr_t pld;
1469 1460 uint32_t pkt_size, pld_byte_cnt, *long_ptr;
1470 1461 int rval;
1471 1462 ql_ct_iu_preamble_t *ct;
1472 1463 ql_xioctl_t *xp = ha->xioctl;
1473 1464 ql_tgt_t tq;
1474 1465 uint16_t comp_status, loop_id;
1475 1466
1476 1467 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1477 1468
1478 1469 /* Get CT argument structure. */
1479 1470 if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1480 1471 EL(ha, "failed, No switch\n");
1481 1472 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1482 1473 cmd->ResponseLen = 0;
1483 1474 return;
1484 1475 }
1485 1476
1486 1477 if (DRIVER_SUSPENDED(ha)) {
1487 1478 EL(ha, "failed, LOOP_NOT_READY\n");
1488 1479 cmd->Status = EXT_STATUS_BUSY;
1489 1480 cmd->ResponseLen = 0;
1490 1481 return;
1491 1482 }
1492 1483
1493 1484 /* Login management server device. */
1494 1485 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1495 1486 tq.d_id.b.al_pa = 0xfa;
1496 1487 tq.d_id.b.area = 0xff;
1497 1488 tq.d_id.b.domain = 0xff;
1498 1489 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1499 1490 MANAGEMENT_SERVER_24XX_LOOP_ID :
1500 1491 MANAGEMENT_SERVER_LOOP_ID);
1501 1492 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1502 1493 if (rval != QL_SUCCESS) {
1503 1494 EL(ha, "failed, server login\n");
1504 1495 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1505 1496 cmd->ResponseLen = 0;
1506 1497 return;
1507 1498 } else {
1508 1499 xp->flags |= QL_MGMT_SERVER_LOGIN;
1509 1500 }
1510 1501 }
1511 1502
1512 1503 QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1513 1504 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1514 1505
1515 1506 /* Allocate a DMA Memory Descriptor */
1516 1507 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1517 1508 if (dma_mem == NULL) {
1518 1509 EL(ha, "failed, kmem_zalloc\n");
1519 1510 cmd->Status = EXT_STATUS_NO_MEMORY;
1520 1511 cmd->ResponseLen = 0;
1521 1512 return;
1522 1513 }
↓ open down ↓ |
1311 lines elided |
↑ open up ↑ |
1523 1514 /* Determine maximum buffer size. */
1524 1515 if (cmd->RequestLen < cmd->ResponseLen) {
1525 1516 pld_byte_cnt = cmd->ResponseLen;
1526 1517 } else {
1527 1518 pld_byte_cnt = cmd->RequestLen;
1528 1519 }
1529 1520
1530 1521 /* Allocate command block. */
1531 1522 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1532 1523 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1533 - if (pkt == NULL) {
1534 - EL(ha, "failed, kmem_zalloc\n");
1535 - cmd->Status = EXT_STATUS_NO_MEMORY;
1536 - cmd->ResponseLen = 0;
1537 - return;
1538 - }
1539 1524 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1540 1525
1541 1526 /* Get command payload data. */
1542 1527 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1543 1528 cmd->RequestLen, mode) != cmd->RequestLen) {
1544 1529 EL(ha, "failed, get_buffer_data\n");
1545 1530 kmem_free(pkt, pkt_size);
1546 1531 cmd->Status = EXT_STATUS_COPY_ERR;
1547 1532 cmd->ResponseLen = 0;
1548 1533 return;
1549 1534 }
1550 1535
1551 1536 /* Get DMA memory for the IOCB */
1552 1537 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1553 1538 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1554 1539 cmn_err(CE_WARN, "%s(%d): DMA memory "
1555 1540 "alloc failed", QL_NAME, ha->instance);
1556 1541 kmem_free(pkt, pkt_size);
1557 1542 kmem_free(dma_mem, sizeof (dma_mem_t));
1558 1543 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1559 1544 cmd->ResponseLen = 0;
1560 1545 return;
1561 1546 }
1562 1547
1563 1548 /* Copy out going payload data to IOCB DMA buffer. */
1564 1549 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1565 1550 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1566 1551
1567 1552 /* Sync IOCB DMA buffer. */
1568 1553 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1569 1554 DDI_DMA_SYNC_FORDEV);
1570 1555
1571 1556 /*
1572 1557 * Setup IOCB
1573 1558 */
1574 1559 ct = (ql_ct_iu_preamble_t *)pld;
1575 1560 if (CFG_IST(ha, CFG_CTRL_24258081)) {
1576 1561 pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1577 1562 pkt->ms24.entry_count = 1;
1578 1563
1579 1564 pkt->ms24.vp_index = ha->vp_index;
1580 1565
1581 1566 /* Set loop ID */
1582 1567 pkt->ms24.n_port_hdl = (uint16_t)
1583 1568 (ct->gs_type == GS_TYPE_DIR_SERVER ?
1584 1569 LE_16(SNS_24XX_HDL) :
1585 1570 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1586 1571
1587 1572 /* Set ISP command timeout. */
1588 1573 pkt->ms24.timeout = LE_16(120);
1589 1574
1590 1575 /* Set cmd/response data segment counts. */
1591 1576 pkt->ms24.cmd_dseg_count = LE_16(1);
1592 1577 pkt->ms24.resp_dseg_count = LE_16(1);
1593 1578
1594 1579 /* Load ct cmd byte count. */
1595 1580 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1596 1581
1597 1582 /* Load ct rsp byte count. */
1598 1583 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1599 1584
1600 1585 long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1601 1586
1602 1587 /* Load MS command entry data segments. */
1603 1588 *long_ptr++ = (uint32_t)
1604 1589 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1605 1590 *long_ptr++ = (uint32_t)
1606 1591 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1607 1592 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1608 1593
1609 1594 /* Load MS response entry data segments. */
1610 1595 *long_ptr++ = (uint32_t)
1611 1596 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1612 1597 *long_ptr++ = (uint32_t)
1613 1598 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1614 1599 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1615 1600
1616 1601 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1617 1602 sizeof (ql_mbx_iocb_t));
1618 1603
1619 1604 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1620 1605 if (comp_status == CS_DATA_UNDERRUN) {
1621 1606 if ((BE_16(ct->max_residual_size)) == 0) {
1622 1607 comp_status = CS_COMPLETE;
1623 1608 }
1624 1609 }
1625 1610
1626 1611 if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1627 1612 0) {
1628 1613 EL(ha, "failed, I/O timeout or "
1629 1614 "es=%xh, ss_l=%xh, rval=%xh\n",
1630 1615 pkt->sts24.entry_status,
1631 1616 pkt->sts24.scsi_status_l, rval);
1632 1617 kmem_free(pkt, pkt_size);
1633 1618 ql_free_dma_resource(ha, dma_mem);
1634 1619 kmem_free(dma_mem, sizeof (dma_mem_t));
1635 1620 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1636 1621 cmd->ResponseLen = 0;
1637 1622 return;
1638 1623 }
1639 1624 } else {
1640 1625 pkt->ms.entry_type = MS_TYPE;
1641 1626 pkt->ms.entry_count = 1;
1642 1627
1643 1628 /* Set loop ID */
1644 1629 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1645 1630 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1646 1631 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1647 1632 pkt->ms.loop_id_l = LSB(loop_id);
1648 1633 pkt->ms.loop_id_h = MSB(loop_id);
1649 1634 } else {
1650 1635 pkt->ms.loop_id_h = LSB(loop_id);
1651 1636 }
1652 1637
1653 1638 /* Set ISP command timeout. */
1654 1639 pkt->ms.timeout = LE_16(120);
1655 1640
1656 1641 /* Set data segment counts. */
1657 1642 pkt->ms.cmd_dseg_count_l = 1;
1658 1643 pkt->ms.total_dseg_count = LE_16(2);
1659 1644
1660 1645 /* Response total byte count. */
1661 1646 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1662 1647 pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1663 1648
1664 1649 /* Command total byte count. */
1665 1650 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1666 1651 pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1667 1652
1668 1653 /* Load command/response data segments. */
1669 1654 pkt->ms.dseg_0_address[0] = (uint32_t)
1670 1655 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 1656 pkt->ms.dseg_0_address[1] = (uint32_t)
1672 1657 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 1658 pkt->ms.dseg_1_address[0] = (uint32_t)
1674 1659 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1675 1660 pkt->ms.dseg_1_address[1] = (uint32_t)
1676 1661 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1677 1662
1678 1663 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1679 1664 sizeof (ql_mbx_iocb_t));
1680 1665
1681 1666 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1682 1667 if (comp_status == CS_DATA_UNDERRUN) {
1683 1668 if ((BE_16(ct->max_residual_size)) == 0) {
1684 1669 comp_status = CS_COMPLETE;
1685 1670 }
1686 1671 }
1687 1672 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1688 1673 EL(ha, "failed, I/O timeout or "
1689 1674 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1690 1675 kmem_free(pkt, pkt_size);
1691 1676 ql_free_dma_resource(ha, dma_mem);
1692 1677 kmem_free(dma_mem, sizeof (dma_mem_t));
1693 1678 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1694 1679 cmd->ResponseLen = 0;
1695 1680 return;
1696 1681 }
1697 1682 }
1698 1683
1699 1684 /* Sync in coming DMA buffer. */
1700 1685 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
1701 1686 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1702 1687 /* Copy in coming DMA data. */
1703 1688 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1704 1689 (uint8_t *)dma_mem->bp, pld_byte_cnt,
1705 1690 DDI_DEV_AUTOINCR);
1706 1691
1707 1692 /* Copy response payload from DMA buffer to application. */
1708 1693 if (cmd->ResponseLen != 0) {
1709 1694 QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1710 1695 cmd->ResponseLen);
1711 1696 QL_DUMP_9(pld, 8, cmd->ResponseLen);
1712 1697
1713 1698 /* Send response payload. */
1714 1699 if (ql_send_buffer_data(pld,
1715 1700 (caddr_t)(uintptr_t)cmd->ResponseAdr,
1716 1701 cmd->ResponseLen, mode) != cmd->ResponseLen) {
1717 1702 EL(ha, "failed, send_buffer_data\n");
1718 1703 cmd->Status = EXT_STATUS_COPY_ERR;
1719 1704 cmd->ResponseLen = 0;
1720 1705 }
1721 1706 }
1722 1707
1723 1708 kmem_free(pkt, pkt_size);
1724 1709 ql_free_dma_resource(ha, dma_mem);
1725 1710 kmem_free(dma_mem, sizeof (dma_mem_t));
1726 1711
1727 1712 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1728 1713 }
1729 1714
1730 1715 /*
1731 1716 * ql_aen_reg
1732 1717 * IOCTL management server Asynchronous Event Tracking Enable/Disable.
1733 1718 *
1734 1719 * Input:
1735 1720 * ha: adapter state pointer.
1736 1721 * cmd: EXT_IOCTL cmd struct pointer.
1737 1722 * mode: flags.
1738 1723 *
1739 1724 * Returns:
1740 1725 * None, request status indicated in cmd->Status.
1741 1726 *
1742 1727 * Context:
1743 1728 * Kernel context.
1744 1729 */
1745 1730 static void
1746 1731 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1747 1732 {
1748 1733 EXT_REG_AEN reg_struct;
1749 1734 int rval = 0;
1750 1735 ql_xioctl_t *xp = ha->xioctl;
1751 1736
1752 1737 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1753 1738
1754 1739 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct,
1755 1740 cmd->RequestLen, mode);
1756 1741
1757 1742 if (rval == 0) {
1758 1743 if (reg_struct.Enable) {
1759 1744 xp->flags |= QL_AEN_TRACKING_ENABLE;
1760 1745 } else {
1761 1746 xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1762 1747 /* Empty the queue. */
1763 1748 INTR_LOCK(ha);
1764 1749 xp->aen_q_head = 0;
1765 1750 xp->aen_q_tail = 0;
1766 1751 INTR_UNLOCK(ha);
1767 1752 }
1768 1753 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1769 1754 } else {
1770 1755 cmd->Status = EXT_STATUS_COPY_ERR;
1771 1756 EL(ha, "failed, ddi_copyin\n");
1772 1757 }
1773 1758 }
1774 1759
1775 1760 /*
1776 1761 * ql_aen_get
1777 1762 * IOCTL management server Asynchronous Event Record Transfer.
1778 1763 *
1779 1764 * Input:
1780 1765 * ha: adapter state pointer.
1781 1766 * cmd: EXT_IOCTL cmd struct pointer.
1782 1767 * mode: flags.
1783 1768 *
1784 1769 * Returns:
1785 1770 * None, request status indicated in cmd->Status.
1786 1771 *
1787 1772 * Context:
1788 1773 * Kernel context.
1789 1774 */
1790 1775 static void
1791 1776 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1792 1777 {
1793 1778 uint32_t out_size;
1794 1779 EXT_ASYNC_EVENT *tmp_q;
1795 1780 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE];
1796 1781 uint8_t i;
1797 1782 uint8_t queue_cnt;
1798 1783 uint8_t request_cnt;
1799 1784 ql_xioctl_t *xp = ha->xioctl;
1800 1785
1801 1786 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1802 1787
1803 1788 /* Compute the number of events that can be returned */
1804 1789 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1805 1790
1806 1791 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1807 1792 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1808 1793 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1809 1794 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1810 1795 "Len=%xh\n", request_cnt);
1811 1796 cmd->ResponseLen = 0;
1812 1797 return;
1813 1798 }
1814 1799
1815 1800 /* 1st: Make a local copy of the entire queue content. */
1816 1801 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1817 1802 queue_cnt = 0;
1818 1803
1819 1804 INTR_LOCK(ha);
1820 1805 i = xp->aen_q_head;
1821 1806
1822 1807 for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1823 1808 if (tmp_q[i].AsyncEventCode != 0) {
1824 1809 bcopy(&tmp_q[i], &aen[queue_cnt],
1825 1810 sizeof (EXT_ASYNC_EVENT));
1826 1811 queue_cnt++;
1827 1812 tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1828 1813 }
1829 1814 if (i == xp->aen_q_tail) {
1830 1815 /* done. */
1831 1816 break;
1832 1817 }
1833 1818 i++;
1834 1819 if (i == EXT_DEF_MAX_AEN_QUEUE) {
1835 1820 i = 0;
1836 1821 }
1837 1822 }
1838 1823
1839 1824 /* Empty the queue. */
1840 1825 xp->aen_q_head = 0;
1841 1826 xp->aen_q_tail = 0;
1842 1827
1843 1828 INTR_UNLOCK(ha);
1844 1829
1845 1830 /* 2nd: Now transfer the queue content to user buffer */
1846 1831 /* Copy the entire queue to user's buffer. */
1847 1832 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1848 1833 if (queue_cnt == 0) {
1849 1834 cmd->ResponseLen = 0;
1850 1835 } else if (ddi_copyout((void *)&aen[0],
1851 1836 (void *)(uintptr_t)(cmd->ResponseAdr),
1852 1837 out_size, mode) != 0) {
1853 1838 cmd->Status = EXT_STATUS_COPY_ERR;
1854 1839 cmd->ResponseLen = 0;
1855 1840 EL(ha, "failed, ddi_copyout\n");
1856 1841 } else {
1857 1842 cmd->ResponseLen = out_size;
1858 1843 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1859 1844 }
1860 1845 }
1861 1846
1862 1847 /*
1863 1848 * ql_enqueue_aen
1864 1849 *
1865 1850 * Input:
1866 1851 * ha: adapter state pointer.
1867 1852 * event_code: async event code of the event to add to queue.
1868 1853 * payload: event payload for the queue.
1869 1854 * INTR_LOCK must be already obtained.
1870 1855 *
1871 1856 * Context:
1872 1857 * Interrupt or Kernel context, no mailbox commands allowed.
1873 1858 */
1874 1859 void
1875 1860 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1876 1861 {
1877 1862 uint8_t new_entry; /* index to current entry */
1878 1863 uint16_t *mbx;
1879 1864 EXT_ASYNC_EVENT *aen_queue;
1880 1865 ql_xioctl_t *xp = ha->xioctl;
1881 1866
1882 1867 QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1883 1868 event_code);
1884 1869
1885 1870 if (xp == NULL) {
1886 1871 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1887 1872 return;
1888 1873 }
1889 1874 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1890 1875
1891 1876 if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1892 1877 /* Need to change queue pointers to make room. */
1893 1878
1894 1879 /* Increment tail for adding new entry. */
1895 1880 xp->aen_q_tail++;
1896 1881 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1897 1882 xp->aen_q_tail = 0;
1898 1883 }
1899 1884 if (xp->aen_q_head == xp->aen_q_tail) {
1900 1885 /*
1901 1886 * We're overwriting the oldest entry, so need to
1902 1887 * update the head pointer.
1903 1888 */
1904 1889 xp->aen_q_head++;
1905 1890 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1906 1891 xp->aen_q_head = 0;
1907 1892 }
1908 1893 }
1909 1894 }
1910 1895
1911 1896 new_entry = xp->aen_q_tail;
1912 1897 aen_queue[new_entry].AsyncEventCode = event_code;
1913 1898
1914 1899 /* Update payload */
1915 1900 if (payload != NULL) {
1916 1901 switch (event_code) {
1917 1902 case MBA_LIP_OCCURRED:
1918 1903 case MBA_LOOP_UP:
1919 1904 case MBA_LOOP_DOWN:
1920 1905 case MBA_LIP_F8:
1921 1906 case MBA_LIP_RESET:
1922 1907 case MBA_PORT_UPDATE:
1923 1908 break;
1924 1909 case MBA_RSCN_UPDATE:
1925 1910 mbx = (uint16_t *)payload;
1926 1911 /* al_pa */
1927 1912 aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1928 1913 LSB(mbx[2]);
1929 1914 /* area */
1930 1915 aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1931 1916 MSB(mbx[2]);
1932 1917 /* domain */
1933 1918 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1934 1919 LSB(mbx[1]);
1935 1920 /* save in big endian */
1936 1921 BIG_ENDIAN_24(&aen_queue[new_entry].
1937 1922 Payload.RSCN.RSCNInfo[0]);
1938 1923
1939 1924 aen_queue[new_entry].Payload.RSCN.AddrFormat =
1940 1925 MSB(mbx[1]);
1941 1926
1942 1927 break;
1943 1928 default:
1944 1929 /* Not supported */
1945 1930 EL(ha, "failed, event code not supported=%xh\n",
1946 1931 event_code);
1947 1932 aen_queue[new_entry].AsyncEventCode = 0;
1948 1933 break;
1949 1934 }
1950 1935 }
1951 1936
1952 1937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1953 1938 }
1954 1939
1955 1940 /*
1956 1941 * ql_scsi_passthru
1957 1942 * IOCTL SCSI passthrough.
1958 1943 *
1959 1944 * Input:
1960 1945 * ha: adapter state pointer.
1961 1946 * cmd: User space SCSI command pointer.
1962 1947 * mode: flags.
1963 1948 *
1964 1949 * Returns:
1965 1950 * None, request status indicated in cmd->Status.
1966 1951 *
1967 1952 * Context:
1968 1953 * Kernel context.
1969 1954 */
1970 1955 static void
1971 1956 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1972 1957 {
1973 1958 ql_mbx_iocb_t *pkt;
1974 1959 ql_mbx_data_t mr;
1975 1960 dma_mem_t *dma_mem;
1976 1961 caddr_t pld;
1977 1962 uint32_t pkt_size, pld_size;
1978 1963 uint16_t qlnt, retries, cnt, cnt2;
1979 1964 uint8_t *name;
1980 1965 EXT_FC_SCSI_PASSTHRU *ufc_req;
1981 1966 EXT_SCSI_PASSTHRU *usp_req;
1982 1967 int rval;
1983 1968 union _passthru {
1984 1969 EXT_SCSI_PASSTHRU sp_cmd;
1985 1970 EXT_FC_SCSI_PASSTHRU fc_cmd;
1986 1971 } pt_req; /* Passthru request */
1987 1972 uint32_t status, sense_sz = 0;
1988 1973 ql_tgt_t *tq = NULL;
1989 1974 EXT_SCSI_PASSTHRU *sp_req = &pt_req.sp_cmd;
1990 1975 EXT_FC_SCSI_PASSTHRU *fc_req = &pt_req.fc_cmd;
1991 1976
1992 1977 /* SCSI request struct for SCSI passthrough IOs. */
1993 1978 struct {
1994 1979 uint16_t lun;
1995 1980 uint16_t sense_length; /* Sense buffer size */
1996 1981 size_t resid; /* Residual */
1997 1982 uint8_t *cdbp; /* Requestor's CDB */
1998 1983 uint8_t *u_sense; /* Requestor's sense buffer */
1999 1984 uint8_t cdb_len; /* Requestor's CDB length */
2000 1985 uint8_t direction;
2001 1986 } scsi_req;
2002 1987
2003 1988 struct {
2004 1989 uint8_t *rsp_info;
2005 1990 uint8_t *req_sense_data;
2006 1991 uint32_t residual_length;
2007 1992 uint32_t rsp_info_length;
2008 1993 uint32_t req_sense_length;
2009 1994 uint16_t comp_status;
2010 1995 uint8_t state_flags_l;
2011 1996 uint8_t state_flags_h;
2012 1997 uint8_t scsi_status_l;
2013 1998 uint8_t scsi_status_h;
2014 1999 } sts;
2015 2000
2016 2001 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2017 2002
2018 2003 /* Verify Sub Code and set cnt to needed request size. */
2019 2004 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2020 2005 pld_size = sizeof (EXT_SCSI_PASSTHRU);
2021 2006 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2022 2007 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2023 2008 } else {
2024 2009 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2025 2010 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2026 2011 cmd->ResponseLen = 0;
2027 2012 return;
2028 2013 }
2029 2014
2030 2015 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2031 2016 if (dma_mem == NULL) {
2032 2017 EL(ha, "failed, kmem_zalloc\n");
2033 2018 cmd->Status = EXT_STATUS_NO_MEMORY;
2034 2019 cmd->ResponseLen = 0;
2035 2020 return;
2036 2021 }
2037 2022 /* Verify the size of and copy in the passthru request structure. */
2038 2023 if (cmd->RequestLen != pld_size) {
2039 2024 /* Return error */
2040 2025 EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2041 2026 cmd->RequestLen, pld_size);
2042 2027 cmd->Status = EXT_STATUS_INVALID_PARAM;
2043 2028 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2044 2029 cmd->ResponseLen = 0;
2045 2030 return;
2046 2031 }
2047 2032
2048 2033 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2049 2034 pld_size, mode) != 0) {
2050 2035 EL(ha, "failed, ddi_copyin\n");
2051 2036 cmd->Status = EXT_STATUS_COPY_ERR;
2052 2037 cmd->ResponseLen = 0;
2053 2038 return;
2054 2039 }
2055 2040
2056 2041 /*
2057 2042 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2058 2043 * request data structure.
2059 2044 */
2060 2045 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2061 2046 scsi_req.lun = sp_req->TargetAddr.Lun;
2062 2047 scsi_req.sense_length = sizeof (sp_req->SenseData);
2063 2048 scsi_req.cdbp = &sp_req->Cdb[0];
2064 2049 scsi_req.cdb_len = sp_req->CdbLength;
2065 2050 scsi_req.direction = sp_req->Direction;
2066 2051 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2067 2052 scsi_req.u_sense = &usp_req->SenseData[0];
2068 2053 cmd->DetailStatus = EXT_DSTATUS_TARGET;
2069 2054
2070 2055 qlnt = QLNT_PORT;
2071 2056 name = (uint8_t *)&sp_req->TargetAddr.Target;
2072 2057 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2073 2058 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2074 2059 tq = ql_find_port(ha, name, qlnt);
2075 2060 } else {
2076 2061 /*
2077 2062 * Must be FC PASSTHRU, verified above.
2078 2063 */
2079 2064 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2080 2065 qlnt = QLNT_PORT;
2081 2066 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2082 2067 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2083 2068 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2084 2069 ha->instance, cmd->SubCode, name[0], name[1],
2085 2070 name[2], name[3], name[4], name[5], name[6],
2086 2071 name[7]);
2087 2072 tq = ql_find_port(ha, name, qlnt);
2088 2073 } else if (fc_req->FCScsiAddr.DestType ==
2089 2074 EXT_DEF_DESTTYPE_WWNN) {
2090 2075 qlnt = QLNT_NODE;
2091 2076 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2092 2077 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2093 2078 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2094 2079 ha->instance, cmd->SubCode, name[0], name[1],
2095 2080 name[2], name[3], name[4], name[5], name[6],
2096 2081 name[7]);
2097 2082 tq = ql_find_port(ha, name, qlnt);
2098 2083 } else if (fc_req->FCScsiAddr.DestType ==
2099 2084 EXT_DEF_DESTTYPE_PORTID) {
2100 2085 qlnt = QLNT_PID;
2101 2086 name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2102 2087 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2103 2088 "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2104 2089 name[0], name[1], name[2]);
2105 2090 tq = ql_find_port(ha, name, qlnt);
2106 2091 } else {
2107 2092 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2108 2093 cmd->SubCode, fc_req->FCScsiAddr.DestType);
2109 2094 cmd->Status = EXT_STATUS_INVALID_PARAM;
2110 2095 cmd->ResponseLen = 0;
2111 2096 return;
2112 2097 }
2113 2098 scsi_req.lun = fc_req->FCScsiAddr.Lun;
2114 2099 scsi_req.sense_length = sizeof (fc_req->SenseData);
2115 2100 scsi_req.cdbp = &sp_req->Cdb[0];
2116 2101 scsi_req.cdb_len = sp_req->CdbLength;
2117 2102 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2118 2103 scsi_req.u_sense = &ufc_req->SenseData[0];
2119 2104 scsi_req.direction = fc_req->Direction;
2120 2105 }
2121 2106
2122 2107 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2123 2108 EL(ha, "failed, fc_port not found\n");
2124 2109 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2125 2110 cmd->ResponseLen = 0;
2126 2111 return;
2127 2112 }
2128 2113
2129 2114 if (tq->flags & TQF_NEED_AUTHENTICATION) {
2130 2115 EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2131 2116 cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2132 2117 cmd->ResponseLen = 0;
↓ open down ↓ |
584 lines elided |
↑ open up ↑ |
2133 2118 return;
2134 2119 }
2135 2120
2136 2121 /* Allocate command block. */
2137 2122 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2138 2123 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2139 2124 cmd->ResponseLen) {
2140 2125 pld_size = cmd->ResponseLen;
2141 2126 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2142 2127 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2143 - if (pkt == NULL) {
2144 - EL(ha, "failed, kmem_zalloc\n");
2145 - cmd->Status = EXT_STATUS_NO_MEMORY;
2146 - cmd->ResponseLen = 0;
2147 - return;
2148 - }
2149 2128 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2150 2129
2151 2130 /* Get DMA memory for the IOCB */
2152 2131 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2153 2132 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2154 2133 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2155 2134 "alloc failed", QL_NAME, ha->instance);
2156 2135 kmem_free(pkt, pkt_size);
2157 2136 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2158 2137 cmd->ResponseLen = 0;
2159 2138 return;
2160 2139 }
2161 2140
2162 2141 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2163 2142 scsi_req.direction = (uint8_t)
2164 2143 (CFG_IST(ha, CFG_CTRL_24258081) ?
2165 2144 CF_RD : CF_DATA_IN | CF_STAG);
2166 2145 } else {
2167 2146 scsi_req.direction = (uint8_t)
2168 2147 (CFG_IST(ha, CFG_CTRL_24258081) ?
2169 2148 CF_WR : CF_DATA_OUT | CF_STAG);
2170 2149 cmd->ResponseLen = 0;
2171 2150
2172 2151 /* Get command payload. */
2173 2152 if (ql_get_buffer_data(
2174 2153 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2175 2154 pld, pld_size, mode) != pld_size) {
2176 2155 EL(ha, "failed, get_buffer_data\n");
2177 2156 cmd->Status = EXT_STATUS_COPY_ERR;
2178 2157
2179 2158 kmem_free(pkt, pkt_size);
2180 2159 ql_free_dma_resource(ha, dma_mem);
2181 2160 kmem_free(dma_mem, sizeof (dma_mem_t));
2182 2161 return;
2183 2162 }
2184 2163
2185 2164 /* Copy out going data to DMA buffer. */
2186 2165 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2187 2166 (uint8_t *)dma_mem->bp, pld_size,
2188 2167 DDI_DEV_AUTOINCR);
2189 2168
2190 2169 /* Sync DMA buffer. */
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
2191 2170 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2192 2171 dma_mem->size, DDI_DMA_SYNC_FORDEV);
2193 2172 }
2194 2173 } else {
2195 2174 scsi_req.direction = (uint8_t)
2196 2175 (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2197 2176 cmd->ResponseLen = 0;
2198 2177
2199 2178 pkt_size = sizeof (ql_mbx_iocb_t);
2200 2179 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2201 - if (pkt == NULL) {
2202 - EL(ha, "failed, kmem_zalloc-2\n");
2203 - cmd->Status = EXT_STATUS_NO_MEMORY;
2204 - return;
2205 - }
2206 2180 pld = NULL;
2207 2181 pld_size = 0;
2208 2182 }
2209 2183
2210 2184 /* retries = ha->port_down_retry_count; */
2211 2185 retries = 1;
2212 2186 cmd->Status = EXT_STATUS_OK;
2213 2187 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2214 2188
2215 2189 QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2216 2190 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2217 2191
2218 2192 do {
2219 2193 if (DRIVER_SUSPENDED(ha)) {
2220 2194 sts.comp_status = CS_LOOP_DOWN_ABORT;
2221 2195 break;
2222 2196 }
2223 2197
2224 2198 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2225 2199 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2226 2200 pkt->cmd24.entry_count = 1;
2227 2201
2228 2202 /* Set LUN number */
2229 2203 pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2230 2204 pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2231 2205
2232 2206 /* Set N_port handle */
2233 2207 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2234 2208
2235 2209 /* Set VP Index */
2236 2210 pkt->cmd24.vp_index = ha->vp_index;
2237 2211
2238 2212 /* Set target ID */
2239 2213 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2240 2214 pkt->cmd24.target_id[1] = tq->d_id.b.area;
2241 2215 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2242 2216
2243 2217 /* Set ISP command timeout. */
2244 2218 pkt->cmd24.timeout = (uint16_t)LE_16(15);
2245 2219
2246 2220 /* Load SCSI CDB */
2247 2221 ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2248 2222 pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2249 2223 DDI_DEV_AUTOINCR);
2250 2224 for (cnt = 0; cnt < MAX_CMDSZ;
2251 2225 cnt = (uint16_t)(cnt + 4)) {
2252 2226 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2253 2227 + cnt, 4);
2254 2228 }
2255 2229
2256 2230 /* Set tag queue control flags */
2257 2231 pkt->cmd24.task = TA_STAG;
2258 2232
2259 2233 if (pld_size) {
2260 2234 /* Set transfer direction. */
2261 2235 pkt->cmd24.control_flags = scsi_req.direction;
2262 2236
2263 2237 /* Set data segment count. */
2264 2238 pkt->cmd24.dseg_count = LE_16(1);
2265 2239
2266 2240 /* Load total byte count. */
2267 2241 pkt->cmd24.total_byte_count = LE_32(pld_size);
2268 2242
2269 2243 /* Load data descriptor. */
2270 2244 pkt->cmd24.dseg_0_address[0] = (uint32_t)
2271 2245 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2272 2246 pkt->cmd24.dseg_0_address[1] = (uint32_t)
2273 2247 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2274 2248 pkt->cmd24.dseg_0_length = LE_32(pld_size);
2275 2249 }
2276 2250 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2277 2251 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2278 2252 pkt->cmd3.entry_count = 1;
2279 2253 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2280 2254 pkt->cmd3.target_l = LSB(tq->loop_id);
2281 2255 pkt->cmd3.target_h = MSB(tq->loop_id);
2282 2256 } else {
2283 2257 pkt->cmd3.target_h = LSB(tq->loop_id);
2284 2258 }
2285 2259 pkt->cmd3.lun_l = LSB(scsi_req.lun);
2286 2260 pkt->cmd3.lun_h = MSB(scsi_req.lun);
2287 2261 pkt->cmd3.control_flags_l = scsi_req.direction;
2288 2262 pkt->cmd3.timeout = LE_16(15);
2289 2263 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2290 2264 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2291 2265 }
2292 2266 if (pld_size) {
2293 2267 pkt->cmd3.dseg_count = LE_16(1);
2294 2268 pkt->cmd3.byte_count = LE_32(pld_size);
2295 2269 pkt->cmd3.dseg_0_address[0] = (uint32_t)
2296 2270 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2297 2271 pkt->cmd3.dseg_0_address[1] = (uint32_t)
2298 2272 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2299 2273 pkt->cmd3.dseg_0_length = LE_32(pld_size);
2300 2274 }
2301 2275 } else {
2302 2276 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2303 2277 pkt->cmd.entry_count = 1;
2304 2278 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2305 2279 pkt->cmd.target_l = LSB(tq->loop_id);
2306 2280 pkt->cmd.target_h = MSB(tq->loop_id);
2307 2281 } else {
2308 2282 pkt->cmd.target_h = LSB(tq->loop_id);
2309 2283 }
2310 2284 pkt->cmd.lun_l = LSB(scsi_req.lun);
2311 2285 pkt->cmd.lun_h = MSB(scsi_req.lun);
2312 2286 pkt->cmd.control_flags_l = scsi_req.direction;
2313 2287 pkt->cmd.timeout = LE_16(15);
2314 2288 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2315 2289 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2316 2290 }
2317 2291 if (pld_size) {
2318 2292 pkt->cmd.dseg_count = LE_16(1);
2319 2293 pkt->cmd.byte_count = LE_32(pld_size);
2320 2294 pkt->cmd.dseg_0_address = (uint32_t)
2321 2295 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2322 2296 pkt->cmd.dseg_0_length = LE_32(pld_size);
2323 2297 }
2324 2298 }
2325 2299 /* Go issue command and wait for completion. */
2326 2300 QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2327 2301 QL_DUMP_9(pkt, 8, pkt_size);
2328 2302
2329 2303 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2330 2304
2331 2305 if (pld_size) {
2332 2306 /* Sync in coming DMA buffer. */
2333 2307 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2334 2308 dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2335 2309 /* Copy in coming DMA data. */
2336 2310 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2337 2311 (uint8_t *)dma_mem->bp, pld_size,
2338 2312 DDI_DEV_AUTOINCR);
2339 2313 }
2340 2314
2341 2315 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2342 2316 pkt->sts24.entry_status = (uint8_t)
2343 2317 (pkt->sts24.entry_status & 0x3c);
2344 2318 } else {
2345 2319 pkt->sts.entry_status = (uint8_t)
2346 2320 (pkt->sts.entry_status & 0x7e);
2347 2321 }
2348 2322
2349 2323 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2350 2324 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2351 2325 pkt->sts.entry_status, tq->d_id.b24);
2352 2326 status = QL_FUNCTION_PARAMETER_ERROR;
2353 2327 }
2354 2328
2355 2329 sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2356 2330 LE_16(pkt->sts24.comp_status) :
2357 2331 LE_16(pkt->sts.comp_status));
2358 2332
2359 2333 /*
2360 2334 * We have verified about all the request that can be so far.
2361 2335 * Now we need to start verification of our ability to
2362 2336 * actually issue the CDB.
2363 2337 */
2364 2338 if (DRIVER_SUSPENDED(ha)) {
2365 2339 sts.comp_status = CS_LOOP_DOWN_ABORT;
2366 2340 break;
2367 2341 } else if (status == QL_SUCCESS &&
2368 2342 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2369 2343 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2370 2344 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2371 2345 if (tq->flags & TQF_FABRIC_DEVICE) {
2372 2346 rval = ql_login_fport(ha, tq, tq->loop_id,
2373 2347 LFF_NO_PLOGI, &mr);
2374 2348 if (rval != QL_SUCCESS) {
2375 2349 EL(ha, "failed, login_fport=%xh, "
2376 2350 "d_id=%xh\n", rval, tq->d_id.b24);
2377 2351 }
2378 2352 } else {
2379 2353 rval = ql_login_lport(ha, tq, tq->loop_id,
2380 2354 LLF_NONE);
2381 2355 if (rval != QL_SUCCESS) {
2382 2356 EL(ha, "failed, login_lport=%xh, "
2383 2357 "d_id=%xh\n", rval, tq->d_id.b24);
2384 2358 }
2385 2359 }
2386 2360 } else {
2387 2361 break;
2388 2362 }
2389 2363
2390 2364 bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2391 2365
2392 2366 } while (retries--);
2393 2367
2394 2368 if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2395 2369 /* Cannot issue command now, maybe later */
2396 2370 EL(ha, "failed, suspended\n");
2397 2371 kmem_free(pkt, pkt_size);
2398 2372 ql_free_dma_resource(ha, dma_mem);
2399 2373 kmem_free(dma_mem, sizeof (dma_mem_t));
2400 2374 cmd->Status = EXT_STATUS_SUSPENDED;
2401 2375 cmd->ResponseLen = 0;
2402 2376 return;
2403 2377 }
2404 2378
2405 2379 if (status != QL_SUCCESS) {
2406 2380 /* Command error */
2407 2381 EL(ha, "failed, I/O\n");
2408 2382 kmem_free(pkt, pkt_size);
2409 2383 ql_free_dma_resource(ha, dma_mem);
2410 2384 kmem_free(dma_mem, sizeof (dma_mem_t));
2411 2385 cmd->Status = EXT_STATUS_ERR;
2412 2386 cmd->DetailStatus = status;
2413 2387 cmd->ResponseLen = 0;
2414 2388 return;
2415 2389 }
2416 2390
2417 2391 /* Setup status. */
2418 2392 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2419 2393 sts.scsi_status_l = pkt->sts24.scsi_status_l;
2420 2394 sts.scsi_status_h = pkt->sts24.scsi_status_h;
2421 2395
2422 2396 /* Setup residuals. */
2423 2397 sts.residual_length = LE_32(pkt->sts24.residual_length);
2424 2398
2425 2399 /* Setup state flags. */
2426 2400 sts.state_flags_l = pkt->sts24.state_flags_l;
2427 2401 sts.state_flags_h = pkt->sts24.state_flags_h;
2428 2402 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2429 2403 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 2404 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 2405 SF_XFERRED_DATA | SF_GOT_STATUS);
2432 2406 } else {
2433 2407 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2434 2408 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2435 2409 SF_GOT_STATUS);
2436 2410 }
2437 2411 if (scsi_req.direction & CF_WR) {
2438 2412 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2439 2413 SF_DATA_OUT);
2440 2414 } else if (scsi_req.direction & CF_RD) {
2441 2415 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2442 2416 SF_DATA_IN);
2443 2417 }
2444 2418 sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2445 2419
2446 2420 /* Setup FCP response info. */
2447 2421 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2448 2422 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2449 2423 sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2450 2424 for (cnt = 0; cnt < sts.rsp_info_length;
2451 2425 cnt = (uint16_t)(cnt + 4)) {
2452 2426 ql_chg_endian(sts.rsp_info + cnt, 4);
2453 2427 }
2454 2428
2455 2429 /* Setup sense data. */
2456 2430 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2457 2431 sts.req_sense_length =
2458 2432 LE_32(pkt->sts24.fcp_sense_length);
2459 2433 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2460 2434 SF_ARQ_DONE);
2461 2435 } else {
2462 2436 sts.req_sense_length = 0;
2463 2437 }
2464 2438 sts.req_sense_data =
2465 2439 &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2466 2440 cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2467 2441 (uintptr_t)sts.req_sense_data);
2468 2442 for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2469 2443 ql_chg_endian(sts.req_sense_data + cnt, 4);
2470 2444 }
2471 2445 } else {
2472 2446 sts.scsi_status_l = pkt->sts.scsi_status_l;
2473 2447 sts.scsi_status_h = pkt->sts.scsi_status_h;
2474 2448
2475 2449 /* Setup residuals. */
2476 2450 sts.residual_length = LE_32(pkt->sts.residual_length);
2477 2451
2478 2452 /* Setup state flags. */
2479 2453 sts.state_flags_l = pkt->sts.state_flags_l;
2480 2454 sts.state_flags_h = pkt->sts.state_flags_h;
2481 2455
2482 2456 /* Setup FCP response info. */
2483 2457 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2484 2458 LE_16(pkt->sts.rsp_info_length) : 0;
2485 2459 sts.rsp_info = &pkt->sts.rsp_info[0];
2486 2460
2487 2461 /* Setup sense data. */
2488 2462 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2489 2463 LE_16(pkt->sts.req_sense_length) : 0;
2490 2464 sts.req_sense_data = &pkt->sts.req_sense_data[0];
2491 2465 }
2492 2466
2493 2467 QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2494 2468 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2495 2469
2496 2470 switch (sts.comp_status) {
2497 2471 case CS_INCOMPLETE:
2498 2472 case CS_ABORTED:
2499 2473 case CS_DEVICE_UNAVAILABLE:
2500 2474 case CS_PORT_UNAVAILABLE:
2501 2475 case CS_PORT_LOGGED_OUT:
2502 2476 case CS_PORT_CONFIG_CHG:
2503 2477 case CS_PORT_BUSY:
2504 2478 case CS_LOOP_DOWN_ABORT:
2505 2479 cmd->Status = EXT_STATUS_BUSY;
2506 2480 break;
2507 2481 case CS_RESET:
2508 2482 case CS_QUEUE_FULL:
2509 2483 cmd->Status = EXT_STATUS_ERR;
2510 2484 break;
2511 2485 case CS_TIMEOUT:
2512 2486 cmd->Status = EXT_STATUS_ERR;
2513 2487 break;
2514 2488 case CS_DATA_OVERRUN:
2515 2489 cmd->Status = EXT_STATUS_DATA_OVERRUN;
2516 2490 break;
2517 2491 case CS_DATA_UNDERRUN:
2518 2492 cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2519 2493 break;
2520 2494 }
2521 2495
2522 2496 /*
2523 2497 * If non data transfer commands fix tranfer counts.
2524 2498 */
2525 2499 if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2526 2500 scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2527 2501 scsi_req.cdbp[0] == SCMD_SEEK ||
2528 2502 scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2529 2503 scsi_req.cdbp[0] == SCMD_RESERVE ||
2530 2504 scsi_req.cdbp[0] == SCMD_RELEASE ||
2531 2505 scsi_req.cdbp[0] == SCMD_START_STOP ||
2532 2506 scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2533 2507 scsi_req.cdbp[0] == SCMD_VERIFY ||
2534 2508 scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2535 2509 scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2536 2510 scsi_req.cdbp[0] == SCMD_SPACE ||
2537 2511 scsi_req.cdbp[0] == SCMD_ERASE ||
2538 2512 (scsi_req.cdbp[0] == SCMD_FORMAT &&
2539 2513 (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2540 2514 /*
2541 2515 * Non data transfer command, clear sts_entry residual
2542 2516 * length.
2543 2517 */
2544 2518 sts.residual_length = 0;
2545 2519 cmd->ResponseLen = 0;
2546 2520 if (sts.comp_status == CS_DATA_UNDERRUN) {
2547 2521 sts.comp_status = CS_COMPLETE;
2548 2522 cmd->Status = EXT_STATUS_OK;
2549 2523 }
2550 2524 } else {
2551 2525 cmd->ResponseLen = pld_size;
2552 2526 }
2553 2527
2554 2528 /* Correct ISP completion status */
2555 2529 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2556 2530 (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2557 2531 QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2558 2532 ha->instance);
2559 2533 scsi_req.resid = 0;
2560 2534 } else if (sts.comp_status == CS_DATA_UNDERRUN) {
2561 2535 QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2562 2536 ha->instance);
2563 2537 scsi_req.resid = sts.residual_length;
2564 2538 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2565 2539 cmd->Status = (uint32_t)EXT_STATUS_OK;
2566 2540
2567 2541 cmd->ResponseLen = (uint32_t)
2568 2542 (pld_size - scsi_req.resid);
2569 2543 } else {
2570 2544 EL(ha, "failed, Transfer ERROR\n");
2571 2545 cmd->Status = EXT_STATUS_ERR;
2572 2546 cmd->ResponseLen = 0;
2573 2547 }
2574 2548 } else {
2575 2549 QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2576 2550 "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2577 2551 tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2578 2552 sts.scsi_status_l);
2579 2553
2580 2554 scsi_req.resid = pld_size;
2581 2555 /*
2582 2556 * Handle residual count on SCSI check
2583 2557 * condition.
2584 2558 *
2585 2559 * - If Residual Under / Over is set, use the
2586 2560 * Residual Transfer Length field in IOCB.
2587 2561 * - If Residual Under / Over is not set, and
2588 2562 * Transferred Data bit is set in State Flags
2589 2563 * field of IOCB, report residual value of 0
2590 2564 * (you may want to do this for tape
2591 2565 * Write-type commands only). This takes care
2592 2566 * of logical end of tape problem and does
2593 2567 * not break Unit Attention.
2594 2568 * - If Residual Under / Over is not set, and
2595 2569 * Transferred Data bit is not set in State
2596 2570 * Flags, report residual value equal to
2597 2571 * original data transfer length.
2598 2572 */
2599 2573 if (sts.scsi_status_l & STATUS_CHECK) {
2600 2574 cmd->Status = EXT_STATUS_SCSI_STATUS;
2601 2575 cmd->DetailStatus = sts.scsi_status_l;
2602 2576 if (sts.scsi_status_h &
2603 2577 (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2604 2578 scsi_req.resid = sts.residual_length;
2605 2579 } else if (sts.state_flags_h &
2606 2580 STATE_XFERRED_DATA) {
2607 2581 scsi_req.resid = 0;
2608 2582 }
2609 2583 }
2610 2584 }
2611 2585
2612 2586 if (sts.scsi_status_l & STATUS_CHECK &&
2613 2587 sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2614 2588 sts.req_sense_length) {
2615 2589 /*
2616 2590 * Check condition with vaild sense data flag set and sense
2617 2591 * length != 0
2618 2592 */
2619 2593 if (sts.req_sense_length > scsi_req.sense_length) {
2620 2594 sense_sz = scsi_req.sense_length;
2621 2595 } else {
2622 2596 sense_sz = sts.req_sense_length;
2623 2597 }
2624 2598
2625 2599 EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2626 2600 tq->d_id.b24);
2627 2601 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2628 2602
2629 2603 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2630 2604 (size_t)sense_sz, mode) != 0) {
2631 2605 EL(ha, "failed, request sense ddi_copyout\n");
2632 2606 }
2633 2607
2634 2608 cmd->Status = EXT_STATUS_SCSI_STATUS;
2635 2609 cmd->DetailStatus = sts.scsi_status_l;
2636 2610 }
2637 2611
2638 2612 /* Copy response payload from DMA buffer to application. */
2639 2613 if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2640 2614 cmd->ResponseLen != 0) {
2641 2615 QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2642 2616 "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2643 2617 scsi_req.resid, pld_size, cmd->ResponseLen);
2644 2618 QL_DUMP_9(pld, 8, cmd->ResponseLen);
2645 2619
2646 2620 /* Send response payload. */
2647 2621 if (ql_send_buffer_data(pld,
2648 2622 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2649 2623 cmd->ResponseLen, mode) != cmd->ResponseLen) {
2650 2624 EL(ha, "failed, send_buffer_data\n");
2651 2625 cmd->Status = EXT_STATUS_COPY_ERR;
2652 2626 cmd->ResponseLen = 0;
2653 2627 }
2654 2628 }
2655 2629
2656 2630 if (cmd->Status != EXT_STATUS_OK) {
2657 2631 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2658 2632 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2659 2633 } else {
2660 2634 /*EMPTY*/
2661 2635 QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2662 2636 ha->instance, cmd->ResponseLen);
2663 2637 }
2664 2638
2665 2639 kmem_free(pkt, pkt_size);
2666 2640 ql_free_dma_resource(ha, dma_mem);
2667 2641 kmem_free(dma_mem, sizeof (dma_mem_t));
2668 2642 }
2669 2643
2670 2644 /*
2671 2645 * ql_wwpn_to_scsiaddr
2672 2646 *
2673 2647 * Input:
2674 2648 * ha: adapter state pointer.
2675 2649 * cmd: EXT_IOCTL cmd struct pointer.
2676 2650 * mode: flags.
2677 2651 *
2678 2652 * Context:
2679 2653 * Kernel context.
2680 2654 */
2681 2655 static void
2682 2656 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2683 2657 {
2684 2658 int status;
2685 2659 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
2686 2660 EXT_SCSI_ADDR *tmp_addr;
2687 2661 ql_tgt_t *tq;
2688 2662
2689 2663 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2690 2664
2691 2665 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2692 2666 /* Return error */
2693 2667 EL(ha, "incorrect RequestLen\n");
2694 2668 cmd->Status = EXT_STATUS_INVALID_PARAM;
2695 2669 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2696 2670 return;
2697 2671 }
2698 2672
2699 2673 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2700 2674 cmd->RequestLen, mode);
2701 2675
2702 2676 if (status != 0) {
2703 2677 cmd->Status = EXT_STATUS_COPY_ERR;
2704 2678 EL(ha, "failed, ddi_copyin\n");
2705 2679 return;
2706 2680 }
2707 2681
2708 2682 tq = ql_find_port(ha, wwpn, QLNT_PORT);
2709 2683
2710 2684 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2711 2685 /* no matching device */
2712 2686 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2713 2687 EL(ha, "failed, device not found\n");
2714 2688 return;
2715 2689 }
2716 2690
2717 2691 /* Copy out the IDs found. For now we can only return target ID. */
2718 2692 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2719 2693
2720 2694 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2721 2695
2722 2696 if (status != 0) {
2723 2697 cmd->Status = EXT_STATUS_COPY_ERR;
2724 2698 EL(ha, "failed, ddi_copyout\n");
2725 2699 } else {
2726 2700 cmd->Status = EXT_STATUS_OK;
2727 2701 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2728 2702 }
2729 2703 }
2730 2704
2731 2705 /*
2732 2706 * ql_host_idx
2733 2707 * Gets host order index.
2734 2708 *
2735 2709 * Input:
2736 2710 * ha: adapter state pointer.
2737 2711 * cmd: EXT_IOCTL cmd struct pointer.
2738 2712 * mode: flags.
2739 2713 *
2740 2714 * Returns:
2741 2715 * None, request status indicated in cmd->Status.
2742 2716 *
2743 2717 * Context:
2744 2718 * Kernel context.
2745 2719 */
2746 2720 static void
2747 2721 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2748 2722 {
2749 2723 uint16_t idx;
2750 2724
2751 2725 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2752 2726
2753 2727 if (cmd->ResponseLen < sizeof (uint16_t)) {
2754 2728 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2755 2729 cmd->DetailStatus = sizeof (uint16_t);
2756 2730 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2757 2731 cmd->ResponseLen = 0;
2758 2732 return;
2759 2733 }
2760 2734
2761 2735 idx = (uint16_t)ha->instance;
2762 2736
2763 2737 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2764 2738 sizeof (uint16_t), mode) != 0) {
2765 2739 cmd->Status = EXT_STATUS_COPY_ERR;
2766 2740 cmd->ResponseLen = 0;
2767 2741 EL(ha, "failed, ddi_copyout\n");
2768 2742 } else {
2769 2743 cmd->ResponseLen = sizeof (uint16_t);
2770 2744 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2771 2745 }
2772 2746 }
2773 2747
2774 2748 /*
2775 2749 * ql_host_drvname
2776 2750 * Gets host driver name
2777 2751 *
2778 2752 * Input:
2779 2753 * ha: adapter state pointer.
2780 2754 * cmd: EXT_IOCTL cmd struct pointer.
2781 2755 * mode: flags.
2782 2756 *
2783 2757 * Returns:
2784 2758 * None, request status indicated in cmd->Status.
2785 2759 *
2786 2760 * Context:
2787 2761 * Kernel context.
2788 2762 */
2789 2763 static void
2790 2764 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2791 2765 {
2792 2766
2793 2767 char drvname[] = QL_NAME;
2794 2768 uint32_t qlnamelen;
2795 2769
2796 2770 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2797 2771
2798 2772 qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2799 2773
2800 2774 if (cmd->ResponseLen < qlnamelen) {
2801 2775 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2802 2776 cmd->DetailStatus = qlnamelen;
2803 2777 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2804 2778 cmd->ResponseLen, qlnamelen);
2805 2779 cmd->ResponseLen = 0;
2806 2780 return;
2807 2781 }
2808 2782
2809 2783 if (ddi_copyout((void *)&drvname,
2810 2784 (void *)(uintptr_t)(cmd->ResponseAdr),
2811 2785 qlnamelen, mode) != 0) {
2812 2786 cmd->Status = EXT_STATUS_COPY_ERR;
2813 2787 cmd->ResponseLen = 0;
2814 2788 EL(ha, "failed, ddi_copyout\n");
2815 2789 } else {
2816 2790 cmd->ResponseLen = qlnamelen-1;
2817 2791 }
2818 2792
2819 2793 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2820 2794 }
2821 2795
2822 2796 /*
2823 2797 * ql_read_nvram
2824 2798 * Get NVRAM contents.
2825 2799 *
2826 2800 * Input:
2827 2801 * ha: adapter state pointer.
2828 2802 * cmd: EXT_IOCTL cmd struct pointer.
2829 2803 * mode: flags.
2830 2804 *
2831 2805 * Returns:
2832 2806 * None, request status indicated in cmd->Status.
2833 2807 *
2834 2808 * Context:
2835 2809 * Kernel context.
2836 2810 */
2837 2811 static void
2838 2812 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2839 2813 {
2840 2814
2841 2815 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2842 2816
2843 2817 if (cmd->ResponseLen < ha->nvram_cache->size) {
2844 2818 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2845 2819 cmd->DetailStatus = ha->nvram_cache->size;
2846 2820 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2847 2821 cmd->ResponseLen);
2848 2822 cmd->ResponseLen = 0;
2849 2823 return;
2850 2824 }
2851 2825
2852 2826 /* Get NVRAM data. */
2853 2827 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2854 2828 mode) != 0) {
2855 2829 cmd->Status = EXT_STATUS_COPY_ERR;
2856 2830 cmd->ResponseLen = 0;
2857 2831 EL(ha, "failed, copy error\n");
2858 2832 } else {
2859 2833 cmd->ResponseLen = ha->nvram_cache->size;
2860 2834 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2861 2835 }
2862 2836 }
2863 2837
2864 2838 /*
2865 2839 * ql_write_nvram
2866 2840 * Loads NVRAM contents.
2867 2841 *
2868 2842 * Input:
2869 2843 * ha: adapter state pointer.
2870 2844 * cmd: EXT_IOCTL cmd struct pointer.
2871 2845 * mode: flags.
2872 2846 *
2873 2847 * Returns:
2874 2848 * None, request status indicated in cmd->Status.
2875 2849 *
2876 2850 * Context:
2877 2851 * Kernel context.
2878 2852 */
2879 2853 static void
2880 2854 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2881 2855 {
2882 2856
2883 2857 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2884 2858
2885 2859 if (cmd->RequestLen < ha->nvram_cache->size) {
2886 2860 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2887 2861 cmd->DetailStatus = ha->nvram_cache->size;
2888 2862 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2889 2863 cmd->RequestLen);
2890 2864 return;
2891 2865 }
2892 2866
2893 2867 /* Load NVRAM data. */
2894 2868 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2895 2869 mode) != 0) {
2896 2870 cmd->Status = EXT_STATUS_COPY_ERR;
2897 2871 EL(ha, "failed, copy error\n");
2898 2872 } else {
2899 2873 /*EMPTY*/
2900 2874 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2901 2875 }
2902 2876 }
2903 2877
2904 2878 /*
2905 2879 * ql_write_vpd
2906 2880 * Loads VPD contents.
2907 2881 *
2908 2882 * Input:
2909 2883 * ha: adapter state pointer.
2910 2884 * cmd: EXT_IOCTL cmd struct pointer.
2911 2885 * mode: flags.
2912 2886 *
2913 2887 * Returns:
2914 2888 * None, request status indicated in cmd->Status.
2915 2889 *
2916 2890 * Context:
2917 2891 * Kernel context.
2918 2892 */
2919 2893 static void
2920 2894 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2921 2895 {
2922 2896 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2923 2897
2924 2898 int32_t rval = 0;
2925 2899
2926 2900 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2927 2901 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2928 2902 EL(ha, "failed, invalid request for HBA\n");
2929 2903 return;
2930 2904 }
2931 2905
2932 2906 if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2933 2907 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2934 2908 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2935 2909 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2936 2910 cmd->RequestLen);
2937 2911 return;
2938 2912 }
2939 2913
2940 2914 /* Load VPD data. */
2941 2915 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2942 2916 mode)) != 0) {
2943 2917 cmd->Status = EXT_STATUS_COPY_ERR;
2944 2918 cmd->DetailStatus = rval;
2945 2919 EL(ha, "failed, errno=%x\n", rval);
2946 2920 } else {
2947 2921 /*EMPTY*/
2948 2922 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2949 2923 }
2950 2924 }
2951 2925
2952 2926 /*
2953 2927 * ql_read_vpd
2954 2928 * Dumps VPD contents.
2955 2929 *
2956 2930 * Input:
2957 2931 * ha: adapter state pointer.
2958 2932 * cmd: EXT_IOCTL cmd struct pointer.
2959 2933 * mode: flags.
2960 2934 *
2961 2935 * Returns:
2962 2936 * None, request status indicated in cmd->Status.
2963 2937 *
2964 2938 * Context:
2965 2939 * Kernel context.
2966 2940 */
2967 2941 static void
2968 2942 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2969 2943 {
2970 2944 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2971 2945
2972 2946 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2973 2947 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2974 2948 EL(ha, "failed, invalid request for HBA\n");
2975 2949 return;
2976 2950 }
2977 2951
2978 2952 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2979 2953 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2980 2954 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2981 2955 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2982 2956 cmd->ResponseLen);
2983 2957 return;
2984 2958 }
2985 2959
2986 2960 /* Dump VPD data. */
2987 2961 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2988 2962 mode)) != 0) {
2989 2963 cmd->Status = EXT_STATUS_COPY_ERR;
2990 2964 EL(ha, "failed,\n");
2991 2965 } else {
2992 2966 /*EMPTY*/
2993 2967 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2994 2968 }
2995 2969 }
2996 2970
2997 2971 /*
2998 2972 * ql_get_fcache
2999 2973 * Dumps flash cache contents.
3000 2974 *
3001 2975 * Input:
3002 2976 * ha: adapter state pointer.
3003 2977 * cmd: EXT_IOCTL cmd struct pointer.
3004 2978 * mode: flags.
3005 2979 *
3006 2980 * Returns:
3007 2981 * None, request status indicated in cmd->Status.
3008 2982 *
3009 2983 * Context:
3010 2984 * Kernel context.
3011 2985 */
3012 2986 static void
3013 2987 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3014 2988 {
3015 2989 uint32_t bsize, boff, types, cpsize, hsize;
3016 2990 ql_fcache_t *fptr;
3017 2991
3018 2992 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3019 2993
3020 2994 CACHE_LOCK(ha);
3021 2995
3022 2996 if (ha->fcache == NULL) {
3023 2997 CACHE_UNLOCK(ha);
3024 2998 cmd->Status = EXT_STATUS_ERR;
3025 2999 EL(ha, "failed, adapter fcache not setup\n");
3026 3000 return;
3027 3001 }
3028 3002
3029 3003 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3030 3004 bsize = 100;
3031 3005 } else {
3032 3006 bsize = 400;
3033 3007 }
3034 3008
3035 3009 if (cmd->ResponseLen < bsize) {
3036 3010 CACHE_UNLOCK(ha);
3037 3011 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3038 3012 cmd->DetailStatus = bsize;
3039 3013 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3040 3014 bsize, cmd->ResponseLen);
3041 3015 return;
3042 3016 }
3043 3017
3044 3018 boff = 0;
3045 3019 bsize = 0;
3046 3020 fptr = ha->fcache;
3047 3021
3048 3022 /*
3049 3023 * For backwards compatibility, get one of each image type
3050 3024 */
3051 3025 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3052 3026 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3053 3027 /* Get the next image */
3054 3028 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3055 3029
3056 3030 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3057 3031
3058 3032 if (ddi_copyout(fptr->buf,
3059 3033 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3060 3034 cpsize, mode) != 0) {
3061 3035 CACHE_UNLOCK(ha);
3062 3036 EL(ha, "ddicopy failed, done\n");
3063 3037 cmd->Status = EXT_STATUS_COPY_ERR;
3064 3038 cmd->DetailStatus = 0;
3065 3039 return;
3066 3040 }
3067 3041 boff += 100;
3068 3042 bsize += cpsize;
3069 3043 types &= ~(fptr->type);
3070 3044 }
3071 3045 }
3072 3046
3073 3047 /*
3074 3048 * Get the firmware image -- it needs to be last in the
3075 3049 * buffer at offset 300 for backwards compatibility. Also for
3076 3050 * backwards compatibility, the pci header is stripped off.
3077 3051 */
3078 3052 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3079 3053
3080 3054 hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3081 3055 if (hsize > fptr->buflen) {
3082 3056 CACHE_UNLOCK(ha);
3083 3057 EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3084 3058 hsize, fptr->buflen);
3085 3059 cmd->Status = EXT_STATUS_COPY_ERR;
3086 3060 cmd->DetailStatus = 0;
3087 3061 return;
3088 3062 }
3089 3063
3090 3064 cpsize = ((fptr->buflen - hsize) < 100 ?
3091 3065 fptr->buflen - hsize : 100);
3092 3066
3093 3067 if (ddi_copyout(fptr->buf+hsize,
3094 3068 (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3095 3069 cpsize, mode) != 0) {
3096 3070 CACHE_UNLOCK(ha);
3097 3071 EL(ha, "fw ddicopy failed, done\n");
3098 3072 cmd->Status = EXT_STATUS_COPY_ERR;
3099 3073 cmd->DetailStatus = 0;
3100 3074 return;
3101 3075 }
3102 3076 bsize += 100;
3103 3077 }
3104 3078
3105 3079 CACHE_UNLOCK(ha);
3106 3080 cmd->Status = EXT_STATUS_OK;
3107 3081 cmd->DetailStatus = bsize;
3108 3082
3109 3083 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3110 3084 }
3111 3085
3112 3086 /*
3113 3087 * ql_get_fcache_ex
3114 3088 * Dumps flash cache contents.
3115 3089 *
3116 3090 * Input:
3117 3091 * ha: adapter state pointer.
3118 3092 * cmd: EXT_IOCTL cmd struct pointer.
3119 3093 * mode: flags.
3120 3094 *
3121 3095 * Returns:
3122 3096 * None, request status indicated in cmd->Status.
3123 3097 *
3124 3098 * Context:
3125 3099 * Kernel context.
3126 3100 */
3127 3101 static void
3128 3102 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3129 3103 {
3130 3104 uint32_t bsize = 0;
3131 3105 uint32_t boff = 0;
3132 3106 ql_fcache_t *fptr;
3133 3107
3134 3108 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3135 3109
3136 3110 CACHE_LOCK(ha);
3137 3111 if (ha->fcache == NULL) {
3138 3112 CACHE_UNLOCK(ha);
3139 3113 cmd->Status = EXT_STATUS_ERR;
3140 3114 EL(ha, "failed, adapter fcache not setup\n");
3141 3115 return;
3142 3116 }
3143 3117
3144 3118 /* Make sure user passed enough buffer space */
3145 3119 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3146 3120 bsize += FBUFSIZE;
3147 3121 }
3148 3122
3149 3123 if (cmd->ResponseLen < bsize) {
3150 3124 CACHE_UNLOCK(ha);
3151 3125 if (cmd->ResponseLen != 0) {
3152 3126 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3153 3127 bsize, cmd->ResponseLen);
3154 3128 }
3155 3129 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3156 3130 cmd->DetailStatus = bsize;
3157 3131 return;
3158 3132 }
3159 3133
3160 3134 boff = 0;
3161 3135 fptr = ha->fcache;
3162 3136 while ((fptr != NULL) && (fptr->buf != NULL)) {
3163 3137 /* Get the next image */
3164 3138 if (ddi_copyout(fptr->buf,
3165 3139 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3166 3140 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3167 3141 mode) != 0) {
3168 3142 CACHE_UNLOCK(ha);
3169 3143 EL(ha, "failed, ddicopy at %xh, done\n", boff);
3170 3144 cmd->Status = EXT_STATUS_COPY_ERR;
3171 3145 cmd->DetailStatus = 0;
3172 3146 return;
3173 3147 }
3174 3148 boff += FBUFSIZE;
3175 3149 fptr = fptr->next;
3176 3150 }
3177 3151
3178 3152 CACHE_UNLOCK(ha);
3179 3153 cmd->Status = EXT_STATUS_OK;
3180 3154 cmd->DetailStatus = bsize;
3181 3155
3182 3156 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3183 3157 }
3184 3158
3185 3159 /*
3186 3160 * ql_read_flash
3187 3161 * Get flash contents.
3188 3162 *
3189 3163 * Input:
3190 3164 * ha: adapter state pointer.
3191 3165 * cmd: EXT_IOCTL cmd struct pointer.
3192 3166 * mode: flags.
3193 3167 *
3194 3168 * Returns:
3195 3169 * None, request status indicated in cmd->Status.
3196 3170 *
3197 3171 * Context:
3198 3172 * Kernel context.
3199 3173 */
3200 3174 static void
3201 3175 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3202 3176 {
3203 3177 ql_xioctl_t *xp = ha->xioctl;
3204 3178
3205 3179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3206 3180
3207 3181 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3208 3182 EL(ha, "ql_stall_driver failed\n");
3209 3183 cmd->Status = EXT_STATUS_BUSY;
3210 3184 cmd->DetailStatus = xp->fdesc.flash_size;
3211 3185 cmd->ResponseLen = 0;
3212 3186 return;
3213 3187 }
3214 3188
3215 3189 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3216 3190 cmd->Status = EXT_STATUS_ERR;
3217 3191 cmd->DetailStatus = xp->fdesc.flash_size;
3218 3192 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3219 3193 cmd->ResponseLen, xp->fdesc.flash_size);
3220 3194 cmd->ResponseLen = 0;
3221 3195 } else {
3222 3196 /* adjust read size to flash size */
3223 3197 if (cmd->ResponseLen > xp->fdesc.flash_size) {
3224 3198 EL(ha, "adjusting req=%xh, max=%xh\n",
3225 3199 cmd->ResponseLen, xp->fdesc.flash_size);
3226 3200 cmd->ResponseLen = xp->fdesc.flash_size;
3227 3201 }
3228 3202
3229 3203 /* Get flash data. */
3230 3204 if (ql_flash_fcode_dump(ha,
3231 3205 (void *)(uintptr_t)(cmd->ResponseAdr),
3232 3206 (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3233 3207 cmd->Status = EXT_STATUS_COPY_ERR;
3234 3208 cmd->ResponseLen = 0;
3235 3209 EL(ha, "failed,\n");
3236 3210 }
3237 3211 }
3238 3212
3239 3213 /* Resume I/O */
3240 3214 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3241 3215 ql_restart_driver(ha);
3242 3216 } else {
3243 3217 EL(ha, "isp_abort_needed for restart\n");
3244 3218 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3245 3219 DRIVER_STALL);
3246 3220 }
3247 3221
3248 3222 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3249 3223 }
3250 3224
3251 3225 /*
3252 3226 * ql_write_flash
3253 3227 * Loads flash contents.
3254 3228 *
3255 3229 * Input:
3256 3230 * ha: adapter state pointer.
3257 3231 * cmd: EXT_IOCTL cmd struct pointer.
3258 3232 * mode: flags.
3259 3233 *
3260 3234 * Returns:
3261 3235 * None, request status indicated in cmd->Status.
3262 3236 *
3263 3237 * Context:
3264 3238 * Kernel context.
3265 3239 */
3266 3240 static void
3267 3241 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3268 3242 {
3269 3243 ql_xioctl_t *xp = ha->xioctl;
3270 3244
3271 3245 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3272 3246
3273 3247 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3274 3248 EL(ha, "ql_stall_driver failed\n");
3275 3249 cmd->Status = EXT_STATUS_BUSY;
3276 3250 cmd->DetailStatus = xp->fdesc.flash_size;
3277 3251 cmd->ResponseLen = 0;
3278 3252 return;
3279 3253 }
3280 3254
3281 3255 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3282 3256 cmd->Status = EXT_STATUS_ERR;
3283 3257 cmd->DetailStatus = xp->fdesc.flash_size;
3284 3258 EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3285 3259 cmd->RequestLen, xp->fdesc.flash_size);
3286 3260 cmd->ResponseLen = 0;
3287 3261 } else {
3288 3262 /* Load flash data. */
3289 3263 if (cmd->RequestLen > xp->fdesc.flash_size) {
3290 3264 cmd->Status = EXT_STATUS_ERR;
3291 3265 cmd->DetailStatus = xp->fdesc.flash_size;
3292 3266 EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3293 3267 cmd->RequestLen, xp->fdesc.flash_size);
3294 3268 } else if (ql_flash_fcode_load(ha,
3295 3269 (void *)(uintptr_t)(cmd->RequestAdr),
3296 3270 (size_t)(cmd->RequestLen), mode) != 0) {
3297 3271 cmd->Status = EXT_STATUS_COPY_ERR;
3298 3272 EL(ha, "failed,\n");
3299 3273 }
3300 3274 }
3301 3275
3302 3276 /* Resume I/O */
3303 3277 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3304 3278 ql_restart_driver(ha);
3305 3279 } else {
3306 3280 EL(ha, "isp_abort_needed for restart\n");
3307 3281 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3308 3282 DRIVER_STALL);
3309 3283 }
3310 3284
3311 3285 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3312 3286 }
3313 3287
3314 3288 /*
3315 3289 * ql_diagnostic_loopback
3316 3290 * Performs EXT_CC_LOOPBACK Command
3317 3291 *
3318 3292 * Input:
3319 3293 * ha: adapter state pointer.
3320 3294 * cmd: Local EXT_IOCTL cmd struct pointer.
3321 3295 * mode: flags.
3322 3296 *
3323 3297 * Returns:
3324 3298 * None, request status indicated in cmd->Status.
3325 3299 *
3326 3300 * Context:
3327 3301 * Kernel context.
3328 3302 */
3329 3303 static void
3330 3304 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3331 3305 {
3332 3306 EXT_LOOPBACK_REQ plbreq;
3333 3307 EXT_LOOPBACK_RSP plbrsp;
3334 3308 ql_mbx_data_t mr;
3335 3309 uint32_t rval;
3336 3310 caddr_t bp;
3337 3311 uint16_t opt;
3338 3312
3339 3313 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3340 3314
3341 3315 /* Get loop back request. */
3342 3316 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3343 3317 (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3344 3318 EL(ha, "failed, ddi_copyin\n");
3345 3319 cmd->Status = EXT_STATUS_COPY_ERR;
3346 3320 cmd->ResponseLen = 0;
3347 3321 return;
3348 3322 }
3349 3323
3350 3324 opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3351 3325
3352 3326 /* Check transfer length fits in buffer. */
3353 3327 if (plbreq.BufferLength < plbreq.TransferCount &&
3354 3328 plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
↓ open down ↓ |
1139 lines elided |
↑ open up ↑ |
3355 3329 EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3356 3330 "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3357 3331 plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3358 3332 cmd->Status = EXT_STATUS_INVALID_PARAM;
3359 3333 cmd->ResponseLen = 0;
3360 3334 return;
3361 3335 }
3362 3336
3363 3337 /* Allocate command memory. */
3364 3338 bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3365 - if (bp == NULL) {
3366 - EL(ha, "failed, kmem_zalloc\n");
3367 - cmd->Status = EXT_STATUS_NO_MEMORY;
3368 - cmd->ResponseLen = 0;
3369 - return;
3370 - }
3371 3339
3372 3340 /* Get loopback data. */
3373 3341 if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3374 3342 bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3375 3343 EL(ha, "failed, ddi_copyin-2\n");
3376 3344 kmem_free(bp, plbreq.TransferCount);
3377 3345 cmd->Status = EXT_STATUS_COPY_ERR;
3378 3346 cmd->ResponseLen = 0;
3379 3347 return;
3380 3348 }
3381 3349
3382 3350 if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3383 3351 ql_stall_driver(ha, 0) != QL_SUCCESS) {
3384 3352 EL(ha, "failed, LOOP_NOT_READY\n");
3385 3353 kmem_free(bp, plbreq.TransferCount);
3386 3354 cmd->Status = EXT_STATUS_BUSY;
3387 3355 cmd->ResponseLen = 0;
3388 3356 return;
3389 3357 }
3390 3358
3391 3359 /* Shutdown IP. */
3392 3360 if (ha->flags & IP_INITIALIZED) {
3393 3361 (void) ql_shutdown_ip(ha);
3394 3362 }
3395 3363
3396 3364 /* determine topology so we can send the loopback or the echo */
3397 3365 /* Echo is supported on 2300's only and above */
3398 3366
3399 3367 if (CFG_IST(ha, CFG_CTRL_8081)) {
3400 3368 if (!(ha->task_daemon_flags & LOOP_DOWN) && opt ==
3401 3369 MBC_LOOPBACK_POINT_EXTERNAL) {
3402 3370 if (plbreq.TransferCount > 252) {
3403 3371 EL(ha, "transfer count (%d) > 252\n",
3404 3372 plbreq.TransferCount);
3405 3373 kmem_free(bp, plbreq.TransferCount);
3406 3374 cmd->Status = EXT_STATUS_INVALID_PARAM;
3407 3375 cmd->ResponseLen = 0;
3408 3376 return;
3409 3377 }
3410 3378 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3411 3379 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3412 3380 MBC_ECHO_ELS, &mr);
3413 3381 } else {
3414 3382 if (CFG_IST(ha, CFG_CTRL_81XX)) {
3415 3383 (void) ql_set_loop_point(ha, opt);
3416 3384 }
3417 3385 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3418 3386 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3419 3387 opt, plbreq.IterationCount, &mr);
3420 3388 if (CFG_IST(ha, CFG_CTRL_81XX)) {
3421 3389 (void) ql_set_loop_point(ha, 0);
3422 3390 }
3423 3391 }
3424 3392 } else {
3425 3393 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3426 3394 (ha->topology & QL_F_PORT) &&
3427 3395 ha->device_id >= 0x2300) {
3428 3396 QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using "
3429 3397 "echo\n", ha->instance);
3430 3398 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3431 3399 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3432 3400 (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ?
3433 3401 MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr);
3434 3402 } else {
3435 3403 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3436 3404 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3437 3405 opt, plbreq.IterationCount, &mr);
3438 3406 }
3439 3407 }
3440 3408
3441 3409 ql_restart_driver(ha);
3442 3410
3443 3411 /* Restart IP if it was shutdown. */
3444 3412 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3445 3413 (void) ql_initialize_ip(ha);
3446 3414 ql_isp_rcvbuf(ha);
3447 3415 }
3448 3416
3449 3417 if (rval != QL_SUCCESS) {
3450 3418 EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3451 3419 kmem_free(bp, plbreq.TransferCount);
3452 3420 cmd->Status = EXT_STATUS_MAILBOX;
3453 3421 cmd->DetailStatus = rval;
3454 3422 cmd->ResponseLen = 0;
3455 3423 return;
3456 3424 }
3457 3425
3458 3426 /* Return loopback data. */
3459 3427 if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3460 3428 plbreq.TransferCount, mode) != plbreq.TransferCount) {
3461 3429 EL(ha, "failed, ddi_copyout\n");
3462 3430 kmem_free(bp, plbreq.TransferCount);
3463 3431 cmd->Status = EXT_STATUS_COPY_ERR;
3464 3432 cmd->ResponseLen = 0;
3465 3433 return;
3466 3434 }
3467 3435 kmem_free(bp, plbreq.TransferCount);
3468 3436
3469 3437 /* Return loopback results. */
3470 3438 plbrsp.BufferAddress = plbreq.BufferAddress;
3471 3439 plbrsp.BufferLength = plbreq.TransferCount;
3472 3440 plbrsp.CompletionStatus = mr.mb[0];
3473 3441
3474 3442 if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3475 3443 plbrsp.CrcErrorCount = 0;
3476 3444 plbrsp.DisparityErrorCount = 0;
3477 3445 plbrsp.FrameLengthErrorCount = 0;
3478 3446 plbrsp.IterationCountLastError = 0;
3479 3447 } else {
3480 3448 plbrsp.CrcErrorCount = mr.mb[1];
3481 3449 plbrsp.DisparityErrorCount = mr.mb[2];
3482 3450 plbrsp.FrameLengthErrorCount = mr.mb[3];
3483 3451 plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3484 3452 }
3485 3453
3486 3454 rval = ddi_copyout((void *)&plbrsp,
3487 3455 (void *)(uintptr_t)cmd->ResponseAdr,
3488 3456 sizeof (EXT_LOOPBACK_RSP), mode);
3489 3457 if (rval != 0) {
3490 3458 EL(ha, "failed, ddi_copyout-2\n");
3491 3459 cmd->Status = EXT_STATUS_COPY_ERR;
3492 3460 cmd->ResponseLen = 0;
3493 3461 return;
3494 3462 }
3495 3463 cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3496 3464
3497 3465 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3498 3466 }
3499 3467
3500 3468 /*
3501 3469 * ql_set_loop_point
3502 3470 * Setup loop point for port configuration.
3503 3471 *
3504 3472 * Input:
3505 3473 * ha: adapter state structure.
3506 3474 * opt: loop point option.
3507 3475 *
3508 3476 * Returns:
3509 3477 * ql local function return status code.
3510 3478 *
3511 3479 * Context:
3512 3480 * Kernel context.
3513 3481 */
3514 3482 static int
3515 3483 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3516 3484 {
3517 3485 ql_mbx_data_t mr;
3518 3486 int rval;
3519 3487 uint32_t timer;
3520 3488
3521 3489 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3522 3490
3523 3491 /*
3524 3492 * We get the current port config, modify the loopback field and
3525 3493 * write it back out.
3526 3494 */
3527 3495 if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3528 3496 EL(ha, "get_port_config status=%xh\n", rval);
3529 3497 return (rval);
3530 3498 }
3531 3499 /*
3532 3500 * Set the loopback mode field while maintaining the others.
3533 3501 * Currently only internal or none are supported.
3534 3502 */
3535 3503 mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK);
3536 3504 if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3537 3505 mr.mb[1] = (uint16_t)(mr.mb[1] |
3538 3506 LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL));
3539 3507 }
3540 3508 /*
3541 3509 * Changing the port configuration will cause the port state to cycle
3542 3510 * down and back up. The indication that this has happened is that
3543 3511 * the point to point flag gets set.
3544 3512 */
3545 3513 ADAPTER_STATE_LOCK(ha);
3546 3514 ha->flags &= ~POINT_TO_POINT;
3547 3515 ADAPTER_STATE_UNLOCK(ha);
3548 3516 if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3549 3517 EL(ha, "set_port_config status=%xh\n", rval);
3550 3518 }
3551 3519
3552 3520 /* wait for a while */
3553 3521 for (timer = opt ? 10 : 0; timer; timer--) {
3554 3522 if (ha->flags & POINT_TO_POINT) {
3555 3523 break;
3556 3524 }
3557 3525 /* Delay for 1000000 usec (1 second). */
3558 3526 ql_delay(ha, 1000000);
3559 3527 }
3560 3528
3561 3529 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3562 3530
3563 3531 return (rval);
3564 3532 }
3565 3533
3566 3534 /*
3567 3535 * ql_send_els_rnid
3568 3536 * IOCTL for extended link service RNID command.
3569 3537 *
3570 3538 * Input:
3571 3539 * ha: adapter state pointer.
3572 3540 * cmd: User space CT arguments pointer.
3573 3541 * mode: flags.
3574 3542 *
3575 3543 * Returns:
3576 3544 * None, request status indicated in cmd->Status.
3577 3545 *
3578 3546 * Context:
3579 3547 * Kernel context.
3580 3548 */
3581 3549 static void
3582 3550 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3583 3551 {
3584 3552 EXT_RNID_REQ tmp_rnid;
3585 3553 port_id_t tmp_fcid;
3586 3554 caddr_t tmp_buf, bptr;
3587 3555 uint32_t copy_len;
3588 3556 ql_tgt_t *tq;
3589 3557 EXT_RNID_DATA rnid_data;
3590 3558 uint32_t loop_ready_wait = 10 * 60 * 10;
3591 3559 int rval = 0;
3592 3560 uint32_t local_hba = 0;
3593 3561
3594 3562 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3595 3563
3596 3564 if (DRIVER_SUSPENDED(ha)) {
3597 3565 EL(ha, "failed, LOOP_NOT_READY\n");
3598 3566 cmd->Status = EXT_STATUS_BUSY;
3599 3567 cmd->ResponseLen = 0;
3600 3568 return;
3601 3569 }
3602 3570
3603 3571 if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3604 3572 /* parameter error */
3605 3573 EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3606 3574 cmd->RequestLen);
3607 3575 cmd->Status = EXT_STATUS_INVALID_PARAM;
3608 3576 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3609 3577 cmd->ResponseLen = 0;
3610 3578 return;
3611 3579 }
3612 3580
3613 3581 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3614 3582 &tmp_rnid, cmd->RequestLen, mode) != 0) {
3615 3583 EL(ha, "failed, ddi_copyin\n");
3616 3584 cmd->Status = EXT_STATUS_COPY_ERR;
3617 3585 cmd->ResponseLen = 0;
3618 3586 return;
3619 3587 }
3620 3588
3621 3589 /* Find loop ID of the device */
3622 3590 if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3623 3591 bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3624 3592 (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3625 3593 (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3626 3594 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3627 3595 EXT_DEF_WWN_NAME_SIZE) == 0) {
3628 3596 local_hba = 1;
3629 3597 } else {
3630 3598 tq = ql_find_port(ha,
3631 3599 (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3632 3600 }
3633 3601 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3634 3602 bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3635 3603 (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3636 3604 (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3637 3605 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3638 3606 EXT_DEF_WWN_NAME_SIZE) == 0) {
3639 3607 local_hba = 1;
3640 3608 } else {
3641 3609 tq = ql_find_port(ha,
3642 3610 (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3643 3611 }
3644 3612 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3645 3613 /*
3646 3614 * Copy caller's d_id to tmp space.
3647 3615 */
3648 3616 bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3649 3617 EXT_DEF_PORTID_SIZE_ACTUAL);
3650 3618 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3651 3619
3652 3620 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
↓ open down ↓ |
272 lines elided |
↑ open up ↑ |
3653 3621 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3654 3622 local_hba = 1;
3655 3623 } else {
3656 3624 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3657 3625 QLNT_PID);
3658 3626 }
3659 3627 }
3660 3628
3661 3629 /* Allocate memory for command. */
3662 3630 tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3663 - if (tmp_buf == NULL) {
3664 - EL(ha, "failed, kmem_zalloc\n");
3665 - cmd->Status = EXT_STATUS_NO_MEMORY;
3666 - cmd->ResponseLen = 0;
3667 - return;
3668 - }
3669 3631
3670 3632 if (local_hba) {
3671 3633 rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3672 3634 if (rval != QL_SUCCESS) {
3673 3635 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3674 3636 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3675 3637 cmd->Status = EXT_STATUS_ERR;
3676 3638 cmd->ResponseLen = 0;
3677 3639 return;
3678 3640 }
3679 3641
3680 3642 /* Save gotten RNID data. */
3681 3643 bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3682 3644
3683 3645 /* Now build the Send RNID response */
3684 3646 tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3685 3647 tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3686 3648 tmp_buf[2] = 0;
3687 3649 tmp_buf[3] = sizeof (EXT_RNID_DATA);
3688 3650
3689 3651 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3690 3652 bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3691 3653 EXT_DEF_WWN_NAME_SIZE);
3692 3654 bcopy(ha->init_ctrl_blk.cb24.node_name,
3693 3655 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3694 3656 EXT_DEF_WWN_NAME_SIZE);
3695 3657 } else {
3696 3658 bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3697 3659 EXT_DEF_WWN_NAME_SIZE);
3698 3660 bcopy(ha->init_ctrl_blk.cb.node_name,
3699 3661 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3700 3662 EXT_DEF_WWN_NAME_SIZE);
3701 3663 }
3702 3664
3703 3665 bcopy((uint8_t *)&rnid_data,
3704 3666 &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3705 3667 sizeof (EXT_RNID_DATA));
3706 3668 } else {
3707 3669 if (tq == NULL) {
3708 3670 /* no matching device */
3709 3671 EL(ha, "failed, device not found\n");
3710 3672 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3711 3673 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3712 3674 cmd->DetailStatus = EXT_DSTATUS_TARGET;
3713 3675 cmd->ResponseLen = 0;
3714 3676 return;
3715 3677 }
3716 3678
3717 3679 /* Send command */
3718 3680 rval = ql_send_rnid_els(ha, tq->loop_id,
3719 3681 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3720 3682 if (rval != QL_SUCCESS) {
3721 3683 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3722 3684 rval, tq->loop_id);
3723 3685 while (LOOP_NOT_READY(ha)) {
3724 3686 ql_delay(ha, 100000);
3725 3687 if (loop_ready_wait-- == 0) {
3726 3688 EL(ha, "failed, loop not ready\n");
3727 3689 cmd->Status = EXT_STATUS_ERR;
3728 3690 cmd->ResponseLen = 0;
3729 3691 }
3730 3692 }
3731 3693 rval = ql_send_rnid_els(ha, tq->loop_id,
3732 3694 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3733 3695 tmp_buf);
3734 3696 if (rval != QL_SUCCESS) {
3735 3697 /* error */
3736 3698 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3737 3699 rval, tq->loop_id);
3738 3700 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3739 3701 cmd->Status = EXT_STATUS_ERR;
3740 3702 cmd->ResponseLen = 0;
3741 3703 return;
3742 3704 }
3743 3705 }
3744 3706 }
3745 3707
3746 3708 /* Copy the response */
3747 3709 copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3748 3710 SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3749 3711
3750 3712 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3751 3713 copy_len, mode) != copy_len) {
3752 3714 cmd->Status = EXT_STATUS_COPY_ERR;
3753 3715 EL(ha, "failed, ddi_copyout\n");
3754 3716 } else {
3755 3717 cmd->ResponseLen = copy_len;
3756 3718 if (copy_len < SEND_RNID_RSP_SIZE) {
3757 3719 cmd->Status = EXT_STATUS_DATA_OVERRUN;
3758 3720 EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3759 3721
3760 3722 } else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3761 3723 cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3762 3724 EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3763 3725 } else {
3764 3726 cmd->Status = EXT_STATUS_OK;
3765 3727 QL_PRINT_9(CE_CONT, "(%d): done\n",
3766 3728 ha->instance);
3767 3729 }
3768 3730 }
3769 3731
3770 3732 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3771 3733 }
3772 3734
3773 3735 /*
3774 3736 * ql_set_host_data
3775 3737 * Process IOCTL subcommand to set host/adapter related data.
3776 3738 *
3777 3739 * Input:
3778 3740 * ha: adapter state pointer.
3779 3741 * cmd: User space CT arguments pointer.
3780 3742 * mode: flags.
3781 3743 *
3782 3744 * Returns:
3783 3745 * None, request status indicated in cmd->Status.
3784 3746 *
3785 3747 * Context:
3786 3748 * Kernel context.
3787 3749 */
3788 3750 static void
3789 3751 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3790 3752 {
3791 3753 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3792 3754 cmd->SubCode);
3793 3755
3794 3756 /*
3795 3757 * case off on command subcode
3796 3758 */
3797 3759 switch (cmd->SubCode) {
3798 3760 case EXT_SC_SET_RNID:
3799 3761 ql_set_rnid_parameters(ha, cmd, mode);
3800 3762 break;
3801 3763 case EXT_SC_RST_STATISTICS:
3802 3764 (void) ql_reset_statistics(ha, cmd);
3803 3765 break;
3804 3766 case EXT_SC_SET_BEACON_STATE:
3805 3767 ql_set_led_state(ha, cmd, mode);
3806 3768 break;
3807 3769 case EXT_SC_SET_PARMS:
3808 3770 case EXT_SC_SET_BUS_MODE:
3809 3771 case EXT_SC_SET_DR_DUMP_BUF:
3810 3772 case EXT_SC_SET_RISC_CODE:
3811 3773 case EXT_SC_SET_FLASH_RAM:
3812 3774 case EXT_SC_SET_LUN_BITMASK:
3813 3775 case EXT_SC_SET_RETRY_CNT:
3814 3776 case EXT_SC_SET_RTIN:
3815 3777 case EXT_SC_SET_FC_LUN_BITMASK:
3816 3778 case EXT_SC_ADD_TARGET_DEVICE:
3817 3779 case EXT_SC_SWAP_TARGET_DEVICE:
3818 3780 case EXT_SC_SET_SEL_TIMEOUT:
3819 3781 default:
3820 3782 /* function not supported. */
3821 3783 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3822 3784 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3823 3785 break;
3824 3786 }
3825 3787
3826 3788 if (cmd->Status != EXT_STATUS_OK) {
3827 3789 EL(ha, "failed, Status=%d\n", cmd->Status);
3828 3790 } else {
3829 3791 /*EMPTY*/
3830 3792 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3831 3793 }
3832 3794 }
3833 3795
3834 3796 /*
3835 3797 * ql_get_host_data
3836 3798 * Performs EXT_CC_GET_DATA subcommands.
3837 3799 *
3838 3800 * Input:
3839 3801 * ha: adapter state pointer.
3840 3802 * cmd: Local EXT_IOCTL cmd struct pointer.
3841 3803 * mode: flags.
3842 3804 *
3843 3805 * Returns:
3844 3806 * None, request status indicated in cmd->Status.
3845 3807 *
3846 3808 * Context:
3847 3809 * Kernel context.
3848 3810 */
3849 3811 static void
3850 3812 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3851 3813 {
3852 3814 int out_size = 0;
3853 3815
3854 3816 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3855 3817 cmd->SubCode);
3856 3818
3857 3819 /* case off on command subcode */
3858 3820 switch (cmd->SubCode) {
3859 3821 case EXT_SC_GET_STATISTICS:
3860 3822 out_size = sizeof (EXT_HBA_PORT_STAT);
3861 3823 break;
3862 3824 case EXT_SC_GET_FC_STATISTICS:
3863 3825 out_size = sizeof (EXT_HBA_PORT_STAT);
3864 3826 break;
3865 3827 case EXT_SC_GET_PORT_SUMMARY:
3866 3828 out_size = sizeof (EXT_DEVICEDATA);
3867 3829 break;
3868 3830 case EXT_SC_GET_RNID:
3869 3831 out_size = sizeof (EXT_RNID_DATA);
3870 3832 break;
3871 3833 case EXT_SC_GET_TARGET_ID:
3872 3834 out_size = sizeof (EXT_DEST_ADDR);
3873 3835 break;
3874 3836 case EXT_SC_GET_BEACON_STATE:
3875 3837 out_size = sizeof (EXT_BEACON_CONTROL);
3876 3838 break;
3877 3839 case EXT_SC_GET_FC4_STATISTICS:
3878 3840 out_size = sizeof (EXT_HBA_FC4STATISTICS);
3879 3841 break;
3880 3842 case EXT_SC_GET_DCBX_PARAM:
3881 3843 out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3882 3844 break;
3883 3845 case EXT_SC_GET_RESOURCE_CNTS:
3884 3846 out_size = sizeof (EXT_RESOURCE_CNTS);
3885 3847 break;
3886 3848 case EXT_SC_GET_FCF_LIST:
3887 3849 out_size = sizeof (EXT_FCF_LIST);
3888 3850 break;
3889 3851 case EXT_SC_GET_SCSI_ADDR:
3890 3852 case EXT_SC_GET_ERR_DETECTIONS:
3891 3853 case EXT_SC_GET_BUS_MODE:
3892 3854 case EXT_SC_GET_DR_DUMP_BUF:
3893 3855 case EXT_SC_GET_RISC_CODE:
3894 3856 case EXT_SC_GET_FLASH_RAM:
3895 3857 case EXT_SC_GET_LINK_STATUS:
3896 3858 case EXT_SC_GET_LOOP_ID:
3897 3859 case EXT_SC_GET_LUN_BITMASK:
3898 3860 case EXT_SC_GET_PORT_DATABASE:
3899 3861 case EXT_SC_GET_PORT_DATABASE_MEM:
3900 3862 case EXT_SC_GET_POSITION_MAP:
3901 3863 case EXT_SC_GET_RETRY_CNT:
3902 3864 case EXT_SC_GET_RTIN:
3903 3865 case EXT_SC_GET_FC_LUN_BITMASK:
3904 3866 case EXT_SC_GET_SEL_TIMEOUT:
3905 3867 default:
3906 3868 /* function not supported. */
3907 3869 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3908 3870 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3909 3871 cmd->ResponseLen = 0;
3910 3872 return;
3911 3873 }
3912 3874
3913 3875 if (cmd->ResponseLen < out_size) {
3914 3876 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3915 3877 cmd->DetailStatus = out_size;
3916 3878 EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3917 3879 cmd->ResponseLen, out_size);
3918 3880 cmd->ResponseLen = 0;
3919 3881 return;
3920 3882 }
3921 3883
3922 3884 switch (cmd->SubCode) {
3923 3885 case EXT_SC_GET_RNID:
3924 3886 ql_get_rnid_parameters(ha, cmd, mode);
3925 3887 break;
3926 3888 case EXT_SC_GET_STATISTICS:
3927 3889 ql_get_statistics(ha, cmd, mode);
3928 3890 break;
3929 3891 case EXT_SC_GET_FC_STATISTICS:
3930 3892 ql_get_statistics_fc(ha, cmd, mode);
3931 3893 break;
3932 3894 case EXT_SC_GET_FC4_STATISTICS:
3933 3895 ql_get_statistics_fc4(ha, cmd, mode);
3934 3896 break;
3935 3897 case EXT_SC_GET_PORT_SUMMARY:
3936 3898 ql_get_port_summary(ha, cmd, mode);
3937 3899 break;
3938 3900 case EXT_SC_GET_TARGET_ID:
3939 3901 ql_get_target_id(ha, cmd, mode);
3940 3902 break;
3941 3903 case EXT_SC_GET_BEACON_STATE:
3942 3904 ql_get_led_state(ha, cmd, mode);
3943 3905 break;
3944 3906 case EXT_SC_GET_DCBX_PARAM:
3945 3907 ql_get_dcbx_parameters(ha, cmd, mode);
3946 3908 break;
3947 3909 case EXT_SC_GET_FCF_LIST:
3948 3910 ql_get_fcf_list(ha, cmd, mode);
3949 3911 break;
3950 3912 case EXT_SC_GET_RESOURCE_CNTS:
3951 3913 ql_get_resource_counts(ha, cmd, mode);
3952 3914 break;
3953 3915 }
3954 3916
3955 3917 if (cmd->Status != EXT_STATUS_OK) {
3956 3918 EL(ha, "failed, Status=%d\n", cmd->Status);
3957 3919 } else {
3958 3920 /*EMPTY*/
3959 3921 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3960 3922 }
3961 3923 }
3962 3924
3963 3925 /* ******************************************************************** */
3964 3926 /* Helper Functions */
3965 3927 /* ******************************************************************** */
3966 3928
3967 3929 /*
3968 3930 * ql_lun_count
3969 3931 * Get numbers of LUNS on target.
3970 3932 *
3971 3933 * Input:
3972 3934 * ha: adapter state pointer.
3973 3935 * q: device queue pointer.
3974 3936 *
3975 3937 * Returns:
3976 3938 * Number of LUNs.
3977 3939 *
3978 3940 * Context:
3979 3941 * Kernel context.
3980 3942 */
3981 3943 static int
3982 3944 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3983 3945 {
3984 3946 int cnt;
3985 3947
3986 3948 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3987 3949
3988 3950 /* Bypass LUNs that failed. */
3989 3951 cnt = ql_report_lun(ha, tq);
3990 3952 if (cnt == 0) {
3991 3953 cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3992 3954 }
3993 3955
3994 3956 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3995 3957
3996 3958 return (cnt);
3997 3959 }
3998 3960
3999 3961 /*
4000 3962 * ql_report_lun
4001 3963 * Get numbers of LUNS using report LUN command.
4002 3964 *
4003 3965 * Input:
4004 3966 * ha: adapter state pointer.
4005 3967 * q: target queue pointer.
4006 3968 *
4007 3969 * Returns:
4008 3970 * Number of LUNs.
4009 3971 *
4010 3972 * Context:
4011 3973 * Kernel context.
4012 3974 */
4013 3975 static int
4014 3976 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4015 3977 {
4016 3978 int rval;
4017 3979 uint8_t retries;
4018 3980 ql_mbx_iocb_t *pkt;
4019 3981 ql_rpt_lun_lst_t *rpt;
4020 3982 dma_mem_t dma_mem;
4021 3983 uint32_t pkt_size, cnt;
4022 3984 uint16_t comp_status;
4023 3985 uint8_t scsi_status_h, scsi_status_l, *reqs;
↓ open down ↓ |
345 lines elided |
↑ open up ↑ |
4024 3986
4025 3987 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4026 3988
4027 3989 if (DRIVER_SUSPENDED(ha)) {
4028 3990 EL(ha, "failed, LOOP_NOT_READY\n");
4029 3991 return (0);
4030 3992 }
4031 3993
4032 3994 pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4033 3995 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4034 - if (pkt == NULL) {
4035 - EL(ha, "failed, kmem_zalloc\n");
4036 - return (0);
4037 - }
4038 3996 rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4039 3997
4040 3998 /* Get DMA memory for the IOCB */
4041 3999 if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4042 4000 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4043 4001 cmn_err(CE_WARN, "%s(%d): DMA memory "
4044 4002 "alloc failed", QL_NAME, ha->instance);
4045 4003 kmem_free(pkt, pkt_size);
4046 4004 return (0);
4047 4005 }
4048 4006
4049 4007 for (retries = 0; retries < 4; retries++) {
4050 4008 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4051 4009 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4052 4010 pkt->cmd24.entry_count = 1;
4053 4011
4054 4012 /* Set N_port handle */
4055 4013 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4056 4014
4057 4015 /* Set target ID */
4058 4016 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4059 4017 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4060 4018 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4061 4019
4062 4020 /* Set Virtual Port ID */
4063 4021 pkt->cmd24.vp_index = ha->vp_index;
4064 4022
4065 4023 /* Set ISP command timeout. */
4066 4024 pkt->cmd24.timeout = LE_16(15);
4067 4025
4068 4026 /* Load SCSI CDB */
4069 4027 pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4070 4028 pkt->cmd24.scsi_cdb[6] =
4071 4029 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4072 4030 pkt->cmd24.scsi_cdb[7] =
4073 4031 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4074 4032 pkt->cmd24.scsi_cdb[8] =
4075 4033 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4076 4034 pkt->cmd24.scsi_cdb[9] =
4077 4035 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4078 4036 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4079 4037 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4080 4038 + cnt, 4);
4081 4039 }
4082 4040
4083 4041 /* Set tag queue control flags */
4084 4042 pkt->cmd24.task = TA_STAG;
4085 4043
4086 4044 /* Set transfer direction. */
4087 4045 pkt->cmd24.control_flags = CF_RD;
4088 4046
4089 4047 /* Set data segment count. */
4090 4048 pkt->cmd24.dseg_count = LE_16(1);
4091 4049
4092 4050 /* Load total byte count. */
4093 4051 /* Load data descriptor. */
4094 4052 pkt->cmd24.dseg_0_address[0] = (uint32_t)
4095 4053 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4096 4054 pkt->cmd24.dseg_0_address[1] = (uint32_t)
4097 4055 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4098 4056 pkt->cmd24.total_byte_count =
4099 4057 LE_32(sizeof (ql_rpt_lun_lst_t));
4100 4058 pkt->cmd24.dseg_0_length =
4101 4059 LE_32(sizeof (ql_rpt_lun_lst_t));
4102 4060 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4103 4061 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4104 4062 pkt->cmd3.entry_count = 1;
4105 4063 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4106 4064 pkt->cmd3.target_l = LSB(tq->loop_id);
4107 4065 pkt->cmd3.target_h = MSB(tq->loop_id);
4108 4066 } else {
4109 4067 pkt->cmd3.target_h = LSB(tq->loop_id);
4110 4068 }
4111 4069 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4112 4070 pkt->cmd3.timeout = LE_16(15);
4113 4071 pkt->cmd3.dseg_count = LE_16(1);
4114 4072 pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4115 4073 pkt->cmd3.scsi_cdb[6] =
4116 4074 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4117 4075 pkt->cmd3.scsi_cdb[7] =
4118 4076 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4119 4077 pkt->cmd3.scsi_cdb[8] =
4120 4078 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4121 4079 pkt->cmd3.scsi_cdb[9] =
4122 4080 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4123 4081 pkt->cmd3.byte_count =
4124 4082 LE_32(sizeof (ql_rpt_lun_lst_t));
4125 4083 pkt->cmd3.dseg_0_address[0] = (uint32_t)
4126 4084 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4127 4085 pkt->cmd3.dseg_0_address[1] = (uint32_t)
4128 4086 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4129 4087 pkt->cmd3.dseg_0_length =
4130 4088 LE_32(sizeof (ql_rpt_lun_lst_t));
4131 4089 } else {
4132 4090 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4133 4091 pkt->cmd.entry_count = 1;
4134 4092 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4135 4093 pkt->cmd.target_l = LSB(tq->loop_id);
4136 4094 pkt->cmd.target_h = MSB(tq->loop_id);
4137 4095 } else {
4138 4096 pkt->cmd.target_h = LSB(tq->loop_id);
4139 4097 }
4140 4098 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4141 4099 pkt->cmd.timeout = LE_16(15);
4142 4100 pkt->cmd.dseg_count = LE_16(1);
4143 4101 pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4144 4102 pkt->cmd.scsi_cdb[6] =
4145 4103 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4146 4104 pkt->cmd.scsi_cdb[7] =
4147 4105 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4148 4106 pkt->cmd.scsi_cdb[8] =
4149 4107 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4150 4108 pkt->cmd.scsi_cdb[9] =
4151 4109 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4152 4110 pkt->cmd.byte_count =
4153 4111 LE_32(sizeof (ql_rpt_lun_lst_t));
4154 4112 pkt->cmd.dseg_0_address = (uint32_t)
4155 4113 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4156 4114 pkt->cmd.dseg_0_length =
4157 4115 LE_32(sizeof (ql_rpt_lun_lst_t));
4158 4116 }
4159 4117
4160 4118 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4161 4119 sizeof (ql_mbx_iocb_t));
4162 4120
4163 4121 /* Sync in coming DMA buffer. */
4164 4122 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4165 4123 DDI_DMA_SYNC_FORKERNEL);
4166 4124 /* Copy in coming DMA data. */
4167 4125 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4168 4126 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4169 4127
4170 4128 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4171 4129 pkt->sts24.entry_status = (uint8_t)
4172 4130 (pkt->sts24.entry_status & 0x3c);
4173 4131 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4174 4132 scsi_status_h = pkt->sts24.scsi_status_h;
4175 4133 scsi_status_l = pkt->sts24.scsi_status_l;
4176 4134 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4177 4135 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4178 4136 reqs = &pkt->sts24.rsp_sense_data[cnt];
4179 4137 } else {
4180 4138 pkt->sts.entry_status = (uint8_t)
4181 4139 (pkt->sts.entry_status & 0x7e);
4182 4140 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4183 4141 scsi_status_h = pkt->sts.scsi_status_h;
4184 4142 scsi_status_l = pkt->sts.scsi_status_l;
4185 4143 reqs = &pkt->sts.req_sense_data[0];
4186 4144 }
4187 4145 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4188 4146 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4189 4147 pkt->sts.entry_status, tq->d_id.b24);
4190 4148 rval = QL_FUNCTION_PARAMETER_ERROR;
4191 4149 }
4192 4150
4193 4151 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4194 4152 scsi_status_l & STATUS_CHECK) {
4195 4153 /* Device underrun, treat as OK. */
4196 4154 if (rval == QL_SUCCESS &&
4197 4155 comp_status == CS_DATA_UNDERRUN &&
4198 4156 scsi_status_h & FCP_RESID_UNDER) {
4199 4157 break;
4200 4158 }
4201 4159
4202 4160 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4203 4161 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4204 4162 comp_status, scsi_status_h, scsi_status_l);
4205 4163
4206 4164 if (rval == QL_SUCCESS) {
4207 4165 if ((comp_status == CS_TIMEOUT) ||
4208 4166 (comp_status == CS_PORT_UNAVAILABLE) ||
4209 4167 (comp_status == CS_PORT_LOGGED_OUT)) {
4210 4168 rval = QL_FUNCTION_TIMEOUT;
4211 4169 break;
4212 4170 }
4213 4171 rval = QL_FUNCTION_FAILED;
4214 4172 } else if (rval == QL_ABORTED) {
4215 4173 break;
4216 4174 }
4217 4175
4218 4176 if (scsi_status_l & STATUS_CHECK) {
4219 4177 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4220 4178 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4221 4179 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4222 4180 reqs[1], reqs[2], reqs[3], reqs[4],
4223 4181 reqs[5], reqs[6], reqs[7], reqs[8],
4224 4182 reqs[9], reqs[10], reqs[11], reqs[12],
4225 4183 reqs[13], reqs[14], reqs[15], reqs[16],
4226 4184 reqs[17]);
4227 4185 }
4228 4186 } else {
4229 4187 break;
4230 4188 }
4231 4189 bzero((caddr_t)pkt, pkt_size);
4232 4190 }
4233 4191
4234 4192 if (rval != QL_SUCCESS) {
4235 4193 EL(ha, "failed=%xh\n", rval);
4236 4194 rval = 0;
4237 4195 } else {
4238 4196 QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4239 4197 QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4240 4198 rval = (int)(BE_32(rpt->hdr.len) / 8);
4241 4199 }
4242 4200
4243 4201 kmem_free(pkt, pkt_size);
4244 4202 ql_free_dma_resource(ha, &dma_mem);
4245 4203
4246 4204 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4247 4205
4248 4206 return (rval);
4249 4207 }
4250 4208
4251 4209 /*
4252 4210 * ql_inq_scan
4253 4211 * Get numbers of LUNS using inquiry command.
4254 4212 *
4255 4213 * Input:
4256 4214 * ha: adapter state pointer.
4257 4215 * tq: target queue pointer.
4258 4216 * count: scan for the number of existing LUNs.
4259 4217 *
4260 4218 * Returns:
4261 4219 * Number of LUNs.
4262 4220 *
4263 4221 * Context:
4264 4222 * Kernel context.
4265 4223 */
4266 4224 static int
4267 4225 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
↓ open down ↓ |
220 lines elided |
↑ open up ↑ |
4268 4226 {
4269 4227 int lun, cnt, rval;
4270 4228 ql_mbx_iocb_t *pkt;
4271 4229 uint8_t *inq;
4272 4230 uint32_t pkt_size;
4273 4231
4274 4232 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4275 4233
4276 4234 pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4277 4235 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4278 - if (pkt == NULL) {
4279 - EL(ha, "failed, kmem_zalloc\n");
4280 - return (0);
4281 - }
4282 4236 inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4283 4237
4284 4238 cnt = 0;
4285 4239 for (lun = 0; lun < MAX_LUNS; lun++) {
4286 4240
4287 4241 if (DRIVER_SUSPENDED(ha)) {
4288 4242 rval = QL_LOOP_DOWN;
4289 4243 cnt = 0;
4290 4244 break;
4291 4245 }
4292 4246
4293 4247 rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4294 4248 if (rval == QL_SUCCESS) {
4295 4249 switch (*inq) {
4296 4250 case DTYPE_DIRECT:
4297 4251 case DTYPE_PROCESSOR: /* Appliance. */
4298 4252 case DTYPE_WORM:
4299 4253 case DTYPE_RODIRECT:
4300 4254 case DTYPE_SCANNER:
4301 4255 case DTYPE_OPTICAL:
4302 4256 case DTYPE_CHANGER:
4303 4257 case DTYPE_ESI:
4304 4258 cnt++;
4305 4259 break;
4306 4260 case DTYPE_SEQUENTIAL:
4307 4261 cnt++;
4308 4262 tq->flags |= TQF_TAPE_DEVICE;
4309 4263 break;
4310 4264 default:
4311 4265 QL_PRINT_9(CE_CONT, "(%d): failed, "
4312 4266 "unsupported device id=%xh, lun=%d, "
4313 4267 "type=%xh\n", ha->instance, tq->loop_id,
4314 4268 lun, *inq);
4315 4269 break;
4316 4270 }
4317 4271
4318 4272 if (*inq == DTYPE_ESI || cnt >= count) {
4319 4273 break;
4320 4274 }
4321 4275 } else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4322 4276 cnt = 0;
4323 4277 break;
4324 4278 }
4325 4279 }
4326 4280
4327 4281 kmem_free(pkt, pkt_size);
4328 4282
4329 4283 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4330 4284
4331 4285 return (cnt);
4332 4286 }
4333 4287
4334 4288 /*
4335 4289 * ql_inq
4336 4290 * Issue inquiry command.
4337 4291 *
4338 4292 * Input:
4339 4293 * ha: adapter state pointer.
4340 4294 * tq: target queue pointer.
4341 4295 * lun: LUN number.
4342 4296 * pkt: command and buffer pointer.
4343 4297 * inq_len: amount of inquiry data.
4344 4298 *
4345 4299 * Returns:
4346 4300 * ql local function return status code.
4347 4301 *
4348 4302 * Context:
4349 4303 * Kernel context.
4350 4304 */
4351 4305 static int
4352 4306 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4353 4307 uint8_t inq_len)
4354 4308 {
4355 4309 dma_mem_t dma_mem;
4356 4310 int rval, retries;
4357 4311 uint32_t pkt_size, cnt;
4358 4312 uint16_t comp_status;
4359 4313 uint8_t scsi_status_h, scsi_status_l, *reqs;
4360 4314 caddr_t inq_data;
4361 4315
4362 4316 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4363 4317
4364 4318 if (DRIVER_SUSPENDED(ha)) {
4365 4319 EL(ha, "failed, loop down\n");
4366 4320 return (QL_FUNCTION_TIMEOUT);
4367 4321 }
4368 4322
4369 4323 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4370 4324 bzero((caddr_t)pkt, pkt_size);
4371 4325
4372 4326 inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4373 4327
4374 4328 /* Get DMA memory for the IOCB */
4375 4329 if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4376 4330 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4377 4331 cmn_err(CE_WARN, "%s(%d): DMA memory "
4378 4332 "alloc failed", QL_NAME, ha->instance);
4379 4333 return (0);
4380 4334 }
4381 4335
4382 4336 for (retries = 0; retries < 4; retries++) {
4383 4337 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4384 4338 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4385 4339 pkt->cmd24.entry_count = 1;
4386 4340
4387 4341 /* Set LUN number */
4388 4342 pkt->cmd24.fcp_lun[2] = LSB(lun);
4389 4343 pkt->cmd24.fcp_lun[3] = MSB(lun);
4390 4344
4391 4345 /* Set N_port handle */
4392 4346 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4393 4347
4394 4348 /* Set target ID */
4395 4349 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4396 4350 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4397 4351 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4398 4352
4399 4353 /* Set Virtual Port ID */
4400 4354 pkt->cmd24.vp_index = ha->vp_index;
4401 4355
4402 4356 /* Set ISP command timeout. */
4403 4357 pkt->cmd24.timeout = LE_16(15);
4404 4358
4405 4359 /* Load SCSI CDB */
4406 4360 pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4407 4361 pkt->cmd24.scsi_cdb[4] = inq_len;
4408 4362 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4409 4363 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4410 4364 + cnt, 4);
4411 4365 }
4412 4366
4413 4367 /* Set tag queue control flags */
4414 4368 pkt->cmd24.task = TA_STAG;
4415 4369
4416 4370 /* Set transfer direction. */
4417 4371 pkt->cmd24.control_flags = CF_RD;
4418 4372
4419 4373 /* Set data segment count. */
4420 4374 pkt->cmd24.dseg_count = LE_16(1);
4421 4375
4422 4376 /* Load total byte count. */
4423 4377 pkt->cmd24.total_byte_count = LE_32(inq_len);
4424 4378
4425 4379 /* Load data descriptor. */
4426 4380 pkt->cmd24.dseg_0_address[0] = (uint32_t)
4427 4381 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4428 4382 pkt->cmd24.dseg_0_address[1] = (uint32_t)
4429 4383 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4430 4384 pkt->cmd24.dseg_0_length = LE_32(inq_len);
4431 4385 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4432 4386 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4433 4387 cnt = CMD_TYPE_3_DATA_SEGMENTS;
4434 4388
4435 4389 pkt->cmd3.entry_count = 1;
4436 4390 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4437 4391 pkt->cmd3.target_l = LSB(tq->loop_id);
4438 4392 pkt->cmd3.target_h = MSB(tq->loop_id);
4439 4393 } else {
4440 4394 pkt->cmd3.target_h = LSB(tq->loop_id);
4441 4395 }
4442 4396 pkt->cmd3.lun_l = LSB(lun);
4443 4397 pkt->cmd3.lun_h = MSB(lun);
4444 4398 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4445 4399 pkt->cmd3.timeout = LE_16(15);
4446 4400 pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4447 4401 pkt->cmd3.scsi_cdb[4] = inq_len;
4448 4402 pkt->cmd3.dseg_count = LE_16(1);
4449 4403 pkt->cmd3.byte_count = LE_32(inq_len);
4450 4404 pkt->cmd3.dseg_0_address[0] = (uint32_t)
4451 4405 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4452 4406 pkt->cmd3.dseg_0_address[1] = (uint32_t)
4453 4407 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4454 4408 pkt->cmd3.dseg_0_length = LE_32(inq_len);
4455 4409 } else {
4456 4410 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4457 4411 cnt = CMD_TYPE_2_DATA_SEGMENTS;
4458 4412
4459 4413 pkt->cmd.entry_count = 1;
4460 4414 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4461 4415 pkt->cmd.target_l = LSB(tq->loop_id);
4462 4416 pkt->cmd.target_h = MSB(tq->loop_id);
4463 4417 } else {
4464 4418 pkt->cmd.target_h = LSB(tq->loop_id);
4465 4419 }
4466 4420 pkt->cmd.lun_l = LSB(lun);
4467 4421 pkt->cmd.lun_h = MSB(lun);
4468 4422 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4469 4423 pkt->cmd.timeout = LE_16(15);
4470 4424 pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4471 4425 pkt->cmd.scsi_cdb[4] = inq_len;
4472 4426 pkt->cmd.dseg_count = LE_16(1);
4473 4427 pkt->cmd.byte_count = LE_32(inq_len);
4474 4428 pkt->cmd.dseg_0_address = (uint32_t)
4475 4429 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4476 4430 pkt->cmd.dseg_0_length = LE_32(inq_len);
4477 4431 }
4478 4432
4479 4433 /* rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4480 4434 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4481 4435 sizeof (ql_mbx_iocb_t));
4482 4436
4483 4437 /* Sync in coming IOCB DMA buffer. */
4484 4438 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4485 4439 DDI_DMA_SYNC_FORKERNEL);
4486 4440 /* Copy in coming DMA data. */
4487 4441 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4488 4442 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4489 4443
4490 4444 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4491 4445 pkt->sts24.entry_status = (uint8_t)
4492 4446 (pkt->sts24.entry_status & 0x3c);
4493 4447 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4494 4448 scsi_status_h = pkt->sts24.scsi_status_h;
4495 4449 scsi_status_l = pkt->sts24.scsi_status_l;
4496 4450 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4497 4451 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4498 4452 reqs = &pkt->sts24.rsp_sense_data[cnt];
4499 4453 } else {
4500 4454 pkt->sts.entry_status = (uint8_t)
4501 4455 (pkt->sts.entry_status & 0x7e);
4502 4456 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4503 4457 scsi_status_h = pkt->sts.scsi_status_h;
4504 4458 scsi_status_l = pkt->sts.scsi_status_l;
4505 4459 reqs = &pkt->sts.req_sense_data[0];
4506 4460 }
4507 4461 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4508 4462 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4509 4463 pkt->sts.entry_status, tq->d_id.b24);
4510 4464 rval = QL_FUNCTION_PARAMETER_ERROR;
4511 4465 }
4512 4466
4513 4467 if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4514 4468 scsi_status_l & STATUS_CHECK) {
4515 4469 EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4516 4470 "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4517 4471 comp_status, scsi_status_h, scsi_status_l);
4518 4472
4519 4473 if (rval == QL_SUCCESS) {
4520 4474 if ((comp_status == CS_TIMEOUT) ||
4521 4475 (comp_status == CS_PORT_UNAVAILABLE) ||
4522 4476 (comp_status == CS_PORT_LOGGED_OUT)) {
4523 4477 rval = QL_FUNCTION_TIMEOUT;
4524 4478 break;
4525 4479 }
4526 4480 rval = QL_FUNCTION_FAILED;
4527 4481 }
4528 4482
4529 4483 if (scsi_status_l & STATUS_CHECK) {
4530 4484 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4531 4485 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4532 4486 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4533 4487 reqs[1], reqs[2], reqs[3], reqs[4],
4534 4488 reqs[5], reqs[6], reqs[7], reqs[8],
4535 4489 reqs[9], reqs[10], reqs[11], reqs[12],
4536 4490 reqs[13], reqs[14], reqs[15], reqs[16],
4537 4491 reqs[17]);
4538 4492 }
4539 4493 } else {
4540 4494 break;
4541 4495 }
4542 4496 }
4543 4497 ql_free_dma_resource(ha, &dma_mem);
4544 4498
4545 4499 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4546 4500
4547 4501 return (rval);
4548 4502 }
4549 4503
4550 4504 /*
4551 4505 * ql_get_buffer_data
4552 4506 * Copies data from user space to kernal buffer.
4553 4507 *
4554 4508 * Input:
4555 4509 * src: User source buffer address.
4556 4510 * dst: Kernal destination buffer address.
4557 4511 * size: Amount of data.
4558 4512 * mode: flags.
4559 4513 *
4560 4514 * Returns:
4561 4515 * Returns number of bytes transferred.
4562 4516 *
4563 4517 * Context:
4564 4518 * Kernel context.
4565 4519 */
4566 4520 static uint32_t
4567 4521 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4568 4522 {
4569 4523 uint32_t cnt;
4570 4524
4571 4525 for (cnt = 0; cnt < size; cnt++) {
4572 4526 if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4573 4527 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4574 4528 break;
4575 4529 }
4576 4530 }
4577 4531
4578 4532 return (cnt);
4579 4533 }
4580 4534
4581 4535 /*
4582 4536 * ql_send_buffer_data
4583 4537 * Copies data from kernal buffer to user space.
4584 4538 *
4585 4539 * Input:
4586 4540 * src: Kernal source buffer address.
4587 4541 * dst: User destination buffer address.
4588 4542 * size: Amount of data.
4589 4543 * mode: flags.
4590 4544 *
4591 4545 * Returns:
4592 4546 * Returns number of bytes transferred.
4593 4547 *
4594 4548 * Context:
4595 4549 * Kernel context.
4596 4550 */
4597 4551 static uint32_t
4598 4552 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4599 4553 {
4600 4554 uint32_t cnt;
4601 4555
4602 4556 for (cnt = 0; cnt < size; cnt++) {
4603 4557 if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4604 4558 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4605 4559 break;
4606 4560 }
4607 4561 }
4608 4562
4609 4563 return (cnt);
4610 4564 }
4611 4565
4612 4566 /*
4613 4567 * ql_find_port
4614 4568 * Locates device queue.
4615 4569 *
4616 4570 * Input:
4617 4571 * ha: adapter state pointer.
4618 4572 * name: device port name.
4619 4573 *
4620 4574 * Returns:
4621 4575 * Returns target queue pointer.
4622 4576 *
4623 4577 * Context:
4624 4578 * Kernel context.
4625 4579 */
4626 4580 static ql_tgt_t *
4627 4581 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4628 4582 {
4629 4583 ql_link_t *link;
4630 4584 ql_tgt_t *tq;
4631 4585 uint16_t index;
4632 4586
4633 4587 /* Scan port list for requested target */
4634 4588 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4635 4589 for (link = ha->dev[index].first; link != NULL;
4636 4590 link = link->next) {
4637 4591 tq = link->base_address;
4638 4592
4639 4593 switch (type) {
4640 4594 case QLNT_LOOP_ID:
4641 4595 if (bcmp(name, &tq->loop_id,
4642 4596 sizeof (uint16_t)) == 0) {
4643 4597 return (tq);
4644 4598 }
4645 4599 break;
4646 4600 case QLNT_PORT:
4647 4601 if (bcmp(name, tq->port_name, 8) == 0) {
4648 4602 return (tq);
4649 4603 }
4650 4604 break;
4651 4605 case QLNT_NODE:
4652 4606 if (bcmp(name, tq->node_name, 8) == 0) {
4653 4607 return (tq);
4654 4608 }
4655 4609 break;
4656 4610 case QLNT_PID:
4657 4611 if (bcmp(name, tq->d_id.r.d_id,
4658 4612 sizeof (tq->d_id.r.d_id)) == 0) {
4659 4613 return (tq);
4660 4614 }
4661 4615 break;
4662 4616 default:
4663 4617 EL(ha, "failed, invalid type=%d\n", type);
4664 4618 return (NULL);
4665 4619 }
4666 4620 }
4667 4621 }
4668 4622
4669 4623 return (NULL);
4670 4624 }
4671 4625
4672 4626 /*
4673 4627 * ql_24xx_flash_desc
4674 4628 * Get flash descriptor table.
4675 4629 *
4676 4630 * Input:
4677 4631 * ha: adapter state pointer.
4678 4632 *
4679 4633 * Returns:
4680 4634 * ql local function return status code.
4681 4635 *
4682 4636 * Context:
4683 4637 * Kernel context.
4684 4638 */
4685 4639 static int
4686 4640 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4687 4641 {
4688 4642 uint32_t cnt;
4689 4643 uint16_t chksum, *bp, data;
4690 4644 int rval;
↓ open down ↓ |
399 lines elided |
↑ open up ↑ |
4691 4645 flash_desc_t *fdesc;
4692 4646 ql_xioctl_t *xp = ha->xioctl;
4693 4647
4694 4648 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4695 4649
4696 4650 if (ha->flash_desc_addr == 0) {
4697 4651 QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance);
4698 4652 return (QL_FUNCTION_FAILED);
4699 4653 }
4700 4654
4701 - if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4702 - EL(ha, "kmem_zalloc=null\n");
4703 - return (QL_MEMORY_ALLOC_FAILED);
4704 - }
4655 + fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP);
4705 4656 rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4706 4657 ha->flash_desc_addr << 2);
4707 4658 if (rval != QL_SUCCESS) {
4708 4659 EL(ha, "read status=%xh\n", rval);
4709 4660 kmem_free(fdesc, sizeof (flash_desc_t));
4710 4661 return (rval);
4711 4662 }
4712 4663
4713 4664 chksum = 0;
4714 4665 bp = (uint16_t *)fdesc;
4715 4666 for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4716 4667 data = *bp++;
4717 4668 LITTLE_ENDIAN_16(&data);
4718 4669 chksum += data;
4719 4670 }
4720 4671
4721 4672 LITTLE_ENDIAN_32(&fdesc->flash_valid);
4722 4673 LITTLE_ENDIAN_16(&fdesc->flash_version);
4723 4674 LITTLE_ENDIAN_16(&fdesc->flash_len);
4724 4675 LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4725 4676 LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4726 4677 LITTLE_ENDIAN_16(&fdesc->flash_id);
4727 4678 LITTLE_ENDIAN_32(&fdesc->block_size);
4728 4679 LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4729 4680 LITTLE_ENDIAN_32(&fdesc->flash_size);
4730 4681 LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4731 4682 LITTLE_ENDIAN_32(&fdesc->read_timeout);
4732 4683
4733 4684 /* flash size in desc table is in 1024 bytes */
4734 4685 fdesc->flash_size = fdesc->flash_size * 0x400;
4735 4686
4736 4687 if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4737 4688 fdesc->flash_version != FLASH_DESC_VERSION) {
4738 4689 EL(ha, "invalid descriptor table\n");
4739 4690 kmem_free(fdesc, sizeof (flash_desc_t));
4740 4691 return (QL_FUNCTION_FAILED);
4741 4692 }
4742 4693
4743 4694 bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4744 4695 kmem_free(fdesc, sizeof (flash_desc_t));
4745 4696
4746 4697 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4747 4698
4748 4699 return (QL_SUCCESS);
4749 4700 }
4750 4701
4751 4702 /*
4752 4703 * ql_setup_flash
4753 4704 * Gets the manufacturer and id number of the flash chip, and
4754 4705 * sets up the size parameter.
4755 4706 *
4756 4707 * Input:
4757 4708 * ha: adapter state pointer.
4758 4709 *
4759 4710 * Returns:
4760 4711 * int: ql local function return status code.
4761 4712 *
4762 4713 * Context:
4763 4714 * Kernel context.
4764 4715 */
4765 4716 static int
4766 4717 ql_setup_flash(ql_adapter_state_t *ha)
4767 4718 {
4768 4719 ql_xioctl_t *xp = ha->xioctl;
4769 4720 int rval = QL_SUCCESS;
4770 4721
4771 4722 if (xp->fdesc.flash_size != 0) {
4772 4723 return (rval);
4773 4724 }
4774 4725
4775 4726 if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4776 4727 return (QL_FUNCTION_FAILED);
4777 4728 }
4778 4729
4779 4730 if (CFG_IST(ha, CFG_CTRL_258081)) {
4780 4731 /*
4781 4732 * Temporarily set the ha->xioctl->fdesc.flash_size to
4782 4733 * 25xx flash size to avoid failing of ql_dump_focde.
4783 4734 */
4784 4735 if (CFG_IST(ha, CFG_CTRL_8021)) {
4785 4736 ha->xioctl->fdesc.flash_size = 0x800000;
4786 4737 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4787 4738 ha->xioctl->fdesc.flash_size = 0x200000;
4788 4739 } else {
4789 4740 ha->xioctl->fdesc.flash_size = 0x400000;
4790 4741 }
4791 4742
4792 4743 if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4793 4744 EL(ha, "flash desc table ok, exit\n");
4794 4745 return (rval);
4795 4746 }
4796 4747 if (CFG_IST(ha, CFG_CTRL_8021)) {
4797 4748 xp->fdesc.flash_manuf = WINBOND_FLASH;
4798 4749 xp->fdesc.flash_id = WINBOND_FLASHID;
4799 4750 xp->fdesc.flash_len = 0x17;
4800 4751 } else {
4801 4752 (void) ql_24xx_flash_id(ha);
4802 4753 }
4803 4754
4804 4755 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
4805 4756 (void) ql_24xx_flash_id(ha);
4806 4757 } else {
4807 4758 ql_flash_enable(ha);
4808 4759
4809 4760 ql_write_flash_byte(ha, 0x5555, 0xaa);
4810 4761 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4811 4762 ql_write_flash_byte(ha, 0x5555, 0x90);
4812 4763 xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4813 4764
4814 4765 if (CFG_IST(ha, CFG_SBUS_CARD)) {
4815 4766 ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4816 4767 ql_write_flash_byte(ha, 0x5555, 0x55);
4817 4768 ql_write_flash_byte(ha, 0xaaaa, 0x90);
4818 4769 xp->fdesc.flash_id = (uint16_t)
4819 4770 ql_read_flash_byte(ha, 0x0002);
4820 4771 } else {
4821 4772 ql_write_flash_byte(ha, 0x5555, 0xaa);
4822 4773 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4823 4774 ql_write_flash_byte(ha, 0x5555, 0x90);
4824 4775 xp->fdesc.flash_id = (uint16_t)
4825 4776 ql_read_flash_byte(ha, 0x0001);
4826 4777 }
4827 4778
4828 4779 ql_write_flash_byte(ha, 0x5555, 0xaa);
4829 4780 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4830 4781 ql_write_flash_byte(ha, 0x5555, 0xf0);
4831 4782
4832 4783 ql_flash_disable(ha);
4833 4784 }
4834 4785
4835 4786 /* Default flash descriptor table. */
4836 4787 xp->fdesc.write_statusreg_cmd = 1;
4837 4788 xp->fdesc.write_enable_bits = 0;
4838 4789 xp->fdesc.unprotect_sector_cmd = 0;
4839 4790 xp->fdesc.protect_sector_cmd = 0;
4840 4791 xp->fdesc.write_disable_bits = 0x9c;
4841 4792 xp->fdesc.block_size = 0x10000;
4842 4793 xp->fdesc.erase_cmd = 0xd8;
4843 4794
4844 4795 switch (xp->fdesc.flash_manuf) {
4845 4796 case AMD_FLASH:
4846 4797 switch (xp->fdesc.flash_id) {
4847 4798 case SPAN_FLASHID_2048K:
4848 4799 xp->fdesc.flash_size = 0x200000;
4849 4800 break;
4850 4801 case AMD_FLASHID_1024K:
4851 4802 xp->fdesc.flash_size = 0x100000;
4852 4803 break;
4853 4804 case AMD_FLASHID_512K:
4854 4805 case AMD_FLASHID_512Kt:
4855 4806 case AMD_FLASHID_512Kb:
4856 4807 if (CFG_IST(ha, CFG_SBUS_CARD)) {
4857 4808 xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4858 4809 } else {
4859 4810 xp->fdesc.flash_size = 0x80000;
4860 4811 }
4861 4812 break;
4862 4813 case AMD_FLASHID_128K:
4863 4814 xp->fdesc.flash_size = 0x20000;
4864 4815 break;
4865 4816 default:
4866 4817 rval = QL_FUNCTION_FAILED;
4867 4818 break;
4868 4819 }
4869 4820 break;
4870 4821 case ST_FLASH:
4871 4822 switch (xp->fdesc.flash_id) {
4872 4823 case ST_FLASHID_128K:
4873 4824 xp->fdesc.flash_size = 0x20000;
4874 4825 break;
4875 4826 case ST_FLASHID_512K:
4876 4827 xp->fdesc.flash_size = 0x80000;
4877 4828 break;
4878 4829 case ST_FLASHID_M25PXX:
4879 4830 if (xp->fdesc.flash_len == 0x14) {
4880 4831 xp->fdesc.flash_size = 0x100000;
4881 4832 } else if (xp->fdesc.flash_len == 0x15) {
4882 4833 xp->fdesc.flash_size = 0x200000;
4883 4834 } else {
4884 4835 rval = QL_FUNCTION_FAILED;
4885 4836 }
4886 4837 break;
4887 4838 default:
4888 4839 rval = QL_FUNCTION_FAILED;
4889 4840 break;
4890 4841 }
4891 4842 break;
4892 4843 case SST_FLASH:
4893 4844 switch (xp->fdesc.flash_id) {
4894 4845 case SST_FLASHID_128K:
4895 4846 xp->fdesc.flash_size = 0x20000;
4896 4847 break;
4897 4848 case SST_FLASHID_1024K_A:
4898 4849 xp->fdesc.flash_size = 0x100000;
4899 4850 xp->fdesc.block_size = 0x8000;
4900 4851 xp->fdesc.erase_cmd = 0x52;
4901 4852 break;
4902 4853 case SST_FLASHID_1024K:
4903 4854 case SST_FLASHID_1024K_B:
4904 4855 xp->fdesc.flash_size = 0x100000;
4905 4856 break;
4906 4857 case SST_FLASHID_2048K:
4907 4858 xp->fdesc.flash_size = 0x200000;
4908 4859 break;
4909 4860 default:
4910 4861 rval = QL_FUNCTION_FAILED;
4911 4862 break;
4912 4863 }
4913 4864 break;
4914 4865 case MXIC_FLASH:
4915 4866 switch (xp->fdesc.flash_id) {
4916 4867 case MXIC_FLASHID_512K:
4917 4868 xp->fdesc.flash_size = 0x80000;
4918 4869 break;
4919 4870 case MXIC_FLASHID_1024K:
4920 4871 xp->fdesc.flash_size = 0x100000;
4921 4872 break;
4922 4873 case MXIC_FLASHID_25LXX:
4923 4874 if (xp->fdesc.flash_len == 0x14) {
4924 4875 xp->fdesc.flash_size = 0x100000;
4925 4876 } else if (xp->fdesc.flash_len == 0x15) {
4926 4877 xp->fdesc.flash_size = 0x200000;
4927 4878 } else {
4928 4879 rval = QL_FUNCTION_FAILED;
4929 4880 }
4930 4881 break;
4931 4882 default:
4932 4883 rval = QL_FUNCTION_FAILED;
4933 4884 break;
4934 4885 }
4935 4886 break;
4936 4887 case ATMEL_FLASH:
4937 4888 switch (xp->fdesc.flash_id) {
4938 4889 case ATMEL_FLASHID_1024K:
4939 4890 xp->fdesc.flash_size = 0x100000;
4940 4891 xp->fdesc.write_disable_bits = 0xbc;
4941 4892 xp->fdesc.unprotect_sector_cmd = 0x39;
4942 4893 xp->fdesc.protect_sector_cmd = 0x36;
4943 4894 break;
4944 4895 default:
4945 4896 rval = QL_FUNCTION_FAILED;
4946 4897 break;
4947 4898 }
4948 4899 break;
4949 4900 case WINBOND_FLASH:
4950 4901 switch (xp->fdesc.flash_id) {
4951 4902 case WINBOND_FLASHID:
4952 4903 if (xp->fdesc.flash_len == 0x15) {
4953 4904 xp->fdesc.flash_size = 0x200000;
4954 4905 } else if (xp->fdesc.flash_len == 0x16) {
4955 4906 xp->fdesc.flash_size = 0x400000;
4956 4907 } else if (xp->fdesc.flash_len == 0x17) {
4957 4908 xp->fdesc.flash_size = 0x800000;
4958 4909 } else {
4959 4910 rval = QL_FUNCTION_FAILED;
4960 4911 }
4961 4912 break;
4962 4913 default:
4963 4914 rval = QL_FUNCTION_FAILED;
4964 4915 break;
4965 4916 }
4966 4917 break;
4967 4918 case INTEL_FLASH:
4968 4919 switch (xp->fdesc.flash_id) {
4969 4920 case INTEL_FLASHID:
4970 4921 if (xp->fdesc.flash_len == 0x11) {
4971 4922 xp->fdesc.flash_size = 0x200000;
4972 4923 } else if (xp->fdesc.flash_len == 0x12) {
4973 4924 xp->fdesc.flash_size = 0x400000;
4974 4925 } else if (xp->fdesc.flash_len == 0x13) {
4975 4926 xp->fdesc.flash_size = 0x800000;
4976 4927 } else {
4977 4928 rval = QL_FUNCTION_FAILED;
4978 4929 }
4979 4930 break;
4980 4931 default:
4981 4932 rval = QL_FUNCTION_FAILED;
4982 4933 break;
4983 4934 }
4984 4935 break;
4985 4936 default:
4986 4937 rval = QL_FUNCTION_FAILED;
4987 4938 break;
4988 4939 }
4989 4940
4990 4941 /* Try flash table later. */
4991 4942 if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) {
4992 4943 EL(ha, "no default id\n");
4993 4944 return (QL_SUCCESS);
4994 4945 }
4995 4946
4996 4947 /*
4997 4948 * hack for non std 2312 and 6312 boards. hardware people need to
4998 4949 * use either the 128k flash chip (original), or something larger.
4999 4950 * For driver purposes, we'll treat it as a 128k flash chip.
5000 4951 */
5001 4952 if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
5002 4953 ha->device_id == 0x2322 || ha->device_id == 0x6322) &&
5003 4954 (xp->fdesc.flash_size > 0x20000) &&
5004 4955 (CFG_IST(ha, CFG_SBUS_CARD) == 0)) {
5005 4956 EL(ha, "chip exceeds max size: %xh, using 128k\n",
5006 4957 xp->fdesc.flash_size);
5007 4958 xp->fdesc.flash_size = 0x20000;
5008 4959 }
5009 4960
5010 4961 if (rval == QL_SUCCESS) {
5011 4962 EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5012 4963 xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5013 4964 xp->fdesc.flash_size);
5014 4965 } else {
5015 4966 EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5016 4967 xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5017 4968 }
5018 4969
5019 4970 return (rval);
5020 4971 }
5021 4972
5022 4973 /*
5023 4974 * ql_flash_fcode_load
5024 4975 * Loads fcode data into flash from application.
5025 4976 *
5026 4977 * Input:
5027 4978 * ha: adapter state pointer.
5028 4979 * bp: user buffer address.
5029 4980 * size: user buffer size.
5030 4981 * mode: flags
5031 4982 *
5032 4983 * Returns:
5033 4984 *
5034 4985 * Context:
5035 4986 * Kernel context.
5036 4987 */
5037 4988 static int
5038 4989 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5039 4990 int mode)
5040 4991 {
5041 4992 uint8_t *bfp;
5042 4993 ql_xioctl_t *xp = ha->xioctl;
5043 4994 int rval = 0;
5044 4995
5045 4996 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5046 4997
5047 4998 if (bsize > xp->fdesc.flash_size) {
5048 4999 EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5049 5000 xp->fdesc.flash_size);
5050 5001 return (ENOMEM);
5051 5002 }
5052 5003
5053 5004 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5054 5005 EL(ha, "failed, kmem_zalloc\n");
5055 5006 rval = ENOMEM;
5056 5007 } else {
5057 5008 if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5058 5009 EL(ha, "failed, ddi_copyin\n");
5059 5010 rval = EFAULT;
5060 5011 } else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5061 5012 EL(ha, "failed, load_fcode\n");
5062 5013 rval = EFAULT;
5063 5014 } else {
5064 5015 /* Reset caches on all adapter instances. */
5065 5016 ql_update_flash_caches(ha);
5066 5017 rval = 0;
5067 5018 }
5068 5019 kmem_free(bfp, bsize);
5069 5020 }
5070 5021
5071 5022 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5072 5023
5073 5024 return (rval);
5074 5025 }
5075 5026
5076 5027 /*
5077 5028 * ql_load_fcode
5078 5029 * Loads fcode in to flash.
5079 5030 *
5080 5031 * Input:
5081 5032 * ha: adapter state pointer.
5082 5033 * dp: data pointer.
5083 5034 * size: data length.
5084 5035 * addr: flash byte address.
5085 5036 *
5086 5037 * Returns:
5087 5038 * ql local function return status code.
5088 5039 *
5089 5040 * Context:
5090 5041 * Kernel context.
5091 5042 */
5092 5043 int
5093 5044 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5094 5045 {
5095 5046 uint32_t cnt;
5096 5047 int rval;
5097 5048
5098 5049 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5099 5050 return (ql_24xx_load_flash(ha, dp, size, addr));
5100 5051 }
5101 5052
5102 5053 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5103 5054
5104 5055 if (CFG_IST(ha, CFG_SBUS_CARD)) {
5105 5056 /*
5106 5057 * sbus has an additional check to make
5107 5058 * sure they don't brick the HBA.
5108 5059 */
5109 5060 if (dp[0] != 0xf1) {
5110 5061 EL(ha, "failed, incorrect fcode for sbus\n");
5111 5062 return (QL_FUNCTION_PARAMETER_ERROR);
5112 5063 }
5113 5064 }
5114 5065
5115 5066 GLOBAL_HW_LOCK();
5116 5067
5117 5068 /* Enable Flash Read/Write. */
5118 5069 ql_flash_enable(ha);
5119 5070
5120 5071 /* Erase flash prior to write. */
5121 5072 rval = ql_erase_flash(ha, 0);
5122 5073
5123 5074 if (rval == QL_SUCCESS) {
5124 5075 /* Write fcode data to flash. */
5125 5076 for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5126 5077 /* Allow other system activity. */
5127 5078 if (cnt % 0x1000 == 0) {
5128 5079 drv_usecwait(1);
5129 5080 }
5130 5081 rval = ql_program_flash_address(ha, addr++, *dp++);
5131 5082 if (rval != QL_SUCCESS)
5132 5083 break;
5133 5084 }
5134 5085 }
5135 5086
5136 5087 ql_flash_disable(ha);
5137 5088
5138 5089 GLOBAL_HW_UNLOCK();
5139 5090
5140 5091 if (rval != QL_SUCCESS) {
5141 5092 EL(ha, "failed, rval=%xh\n", rval);
5142 5093 } else {
5143 5094 /*EMPTY*/
5144 5095 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5145 5096 }
5146 5097 return (rval);
5147 5098 }
5148 5099
5149 5100 /*
5150 5101 * ql_flash_fcode_dump
5151 5102 * Dumps FLASH to application.
5152 5103 *
5153 5104 * Input:
5154 5105 * ha: adapter state pointer.
5155 5106 * bp: user buffer address.
5156 5107 * bsize: user buffer size
5157 5108 * faddr: flash byte address
5158 5109 * mode: flags
5159 5110 *
5160 5111 * Returns:
5161 5112 *
5162 5113 * Context:
5163 5114 * Kernel context.
5164 5115 */
5165 5116 static int
5166 5117 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5167 5118 uint32_t faddr, int mode)
5168 5119 {
5169 5120 uint8_t *bfp;
5170 5121 int rval;
5171 5122 ql_xioctl_t *xp = ha->xioctl;
5172 5123
5173 5124 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5174 5125
5175 5126 /* adjust max read size to flash size */
5176 5127 if (bsize > xp->fdesc.flash_size) {
5177 5128 EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5178 5129 xp->fdesc.flash_size);
5179 5130 bsize = xp->fdesc.flash_size;
5180 5131 }
5181 5132
5182 5133 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5183 5134 EL(ha, "failed, kmem_zalloc\n");
5184 5135 rval = ENOMEM;
5185 5136 } else {
5186 5137 /* Dump Flash fcode. */
5187 5138 rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5188 5139
5189 5140 if (rval != QL_SUCCESS) {
5190 5141 EL(ha, "failed, dump_fcode = %x\n", rval);
5191 5142 rval = EFAULT;
5192 5143 } else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5193 5144 EL(ha, "failed, ddi_copyout\n");
5194 5145 rval = EFAULT;
5195 5146 } else {
5196 5147 rval = 0;
5197 5148 }
5198 5149 kmem_free(bfp, bsize);
5199 5150 }
5200 5151
5201 5152 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5202 5153
5203 5154 return (rval);
5204 5155 }
5205 5156
5206 5157 /*
5207 5158 * ql_dump_fcode
5208 5159 * Dumps fcode from flash.
5209 5160 *
5210 5161 * Input:
5211 5162 * ha: adapter state pointer.
5212 5163 * dp: data pointer.
5213 5164 * size: data length in bytes.
5214 5165 * startpos: starting position in flash (byte address).
5215 5166 *
5216 5167 * Returns:
5217 5168 * ql local function return status code.
5218 5169 *
5219 5170 * Context:
5220 5171 * Kernel context.
5221 5172 *
5222 5173 */
5223 5174 int
5224 5175 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5225 5176 uint32_t startpos)
5226 5177 {
5227 5178 uint32_t cnt, data, addr;
5228 5179 uint8_t bp[4], *src;
5229 5180 int fp_rval, rval = QL_SUCCESS;
5230 5181 dma_mem_t mem;
5231 5182
5232 5183 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5233 5184
5234 5185 /* make sure startpos+size doesn't exceed flash */
5235 5186 if (size + startpos > ha->xioctl->fdesc.flash_size) {
5236 5187 EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5237 5188 size, startpos, ha->xioctl->fdesc.flash_size);
5238 5189 return (QL_FUNCTION_PARAMETER_ERROR);
5239 5190 }
5240 5191
5241 5192 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5242 5193 /* check start addr is 32 bit aligned for 24xx */
5243 5194 if ((startpos & 0x3) != 0) {
5244 5195 rval = ql_24xx_read_flash(ha,
5245 5196 ha->flash_data_addr | startpos >> 2, &data);
5246 5197 if (rval != QL_SUCCESS) {
5247 5198 EL(ha, "failed2, rval = %xh\n", rval);
5248 5199 return (rval);
5249 5200 }
5250 5201 bp[0] = LSB(LSW(data));
5251 5202 bp[1] = MSB(LSW(data));
5252 5203 bp[2] = LSB(MSW(data));
5253 5204 bp[3] = MSB(MSW(data));
5254 5205 while (size && startpos & 0x3) {
5255 5206 *dp++ = bp[startpos & 0x3];
5256 5207 startpos++;
5257 5208 size--;
5258 5209 }
5259 5210 if (size == 0) {
5260 5211 QL_PRINT_9(CE_CONT, "(%d): done2\n",
5261 5212 ha->instance);
5262 5213 return (rval);
5263 5214 }
5264 5215 }
5265 5216
5266 5217 /* adjust 24xx start addr for 32 bit words */
5267 5218 addr = startpos / 4 | ha->flash_data_addr;
5268 5219 }
5269 5220
5270 5221 bzero(&mem, sizeof (dma_mem_t));
5271 5222 /* Check for Fast page is supported */
5272 5223 if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5273 5224 (CFG_IST(ha, CFG_CTRL_2581))) {
5274 5225 fp_rval = QL_SUCCESS;
5275 5226 /* Setup DMA buffer. */
5276 5227 rval = ql_get_dma_mem(ha, &mem, size,
5277 5228 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5278 5229 if (rval != QL_SUCCESS) {
5279 5230 EL(ha, "failed, ql_get_dma_mem=%xh\n",
5280 5231 rval);
5281 5232 return (ENOMEM);
5282 5233 }
5283 5234 } else {
5284 5235 fp_rval = QL_NOT_SUPPORTED;
5285 5236 }
5286 5237
5287 5238 GLOBAL_HW_LOCK();
5288 5239
5289 5240 /* Enable Flash Read/Write. */
5290 5241 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5291 5242 ql_flash_enable(ha);
5292 5243 }
5293 5244
5294 5245 /* Read fcode data from flash. */
5295 5246 while (size) {
5296 5247 /* Allow other system activity. */
5297 5248 if (size % 0x1000 == 0) {
5298 5249 ql_delay(ha, 100000);
5299 5250 }
5300 5251 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5301 5252 if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5302 5253 cnt = (size + 3) >> 2;
5303 5254 fp_rval = ql_rd_risc_ram(ha, addr,
5304 5255 mem.cookie.dmac_laddress, cnt);
5305 5256 if (fp_rval == QL_SUCCESS) {
5306 5257 for (src = mem.bp; size; size--) {
5307 5258 *dp++ = *src++;
5308 5259 }
5309 5260 addr += cnt;
5310 5261 continue;
5311 5262 }
5312 5263 }
5313 5264 rval = ql_24xx_read_flash(ha, addr++,
5314 5265 &data);
5315 5266 if (rval != QL_SUCCESS) {
5316 5267 break;
5317 5268 }
5318 5269 bp[0] = LSB(LSW(data));
5319 5270 bp[1] = MSB(LSW(data));
5320 5271 bp[2] = LSB(MSW(data));
5321 5272 bp[3] = MSB(MSW(data));
5322 5273 for (cnt = 0; size && cnt < 4; size--) {
5323 5274 *dp++ = bp[cnt++];
5324 5275 }
5325 5276 } else {
5326 5277 *dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5327 5278 size--;
5328 5279 }
5329 5280 }
5330 5281
5331 5282 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5332 5283 ql_flash_disable(ha);
5333 5284 }
5334 5285
5335 5286 GLOBAL_HW_UNLOCK();
5336 5287
5337 5288 if (mem.dma_handle != NULL) {
5338 5289 ql_free_dma_resource(ha, &mem);
5339 5290 }
5340 5291
5341 5292 if (rval != QL_SUCCESS) {
5342 5293 EL(ha, "failed, rval = %xh\n", rval);
5343 5294 } else {
5344 5295 /*EMPTY*/
5345 5296 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5346 5297 }
5347 5298 return (rval);
5348 5299 }
5349 5300
5350 5301 /*
5351 5302 * ql_program_flash_address
5352 5303 * Program flash address.
5353 5304 *
5354 5305 * Input:
5355 5306 * ha: adapter state pointer.
5356 5307 * addr: flash byte address.
5357 5308 * data: data to be written to flash.
5358 5309 *
5359 5310 * Returns:
5360 5311 * ql local function return status code.
5361 5312 *
5362 5313 * Context:
5363 5314 * Kernel context.
5364 5315 */
5365 5316 static int
5366 5317 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5367 5318 uint8_t data)
5368 5319 {
5369 5320 int rval;
5370 5321
5371 5322 /* Write Program Command Sequence */
5372 5323 if (CFG_IST(ha, CFG_SBUS_CARD)) {
5373 5324 ql_write_flash_byte(ha, 0x5555, 0xa0);
5374 5325 ql_write_flash_byte(ha, addr, data);
5375 5326 } else {
5376 5327 ql_write_flash_byte(ha, 0x5555, 0xaa);
5377 5328 ql_write_flash_byte(ha, 0x2aaa, 0x55);
5378 5329 ql_write_flash_byte(ha, 0x5555, 0xa0);
5379 5330 ql_write_flash_byte(ha, addr, data);
5380 5331 }
5381 5332
5382 5333 /* Wait for write to complete. */
5383 5334 rval = ql_poll_flash(ha, addr, data);
5384 5335
5385 5336 if (rval != QL_SUCCESS) {
5386 5337 EL(ha, "failed, rval=%xh\n", rval);
5387 5338 }
5388 5339 return (rval);
5389 5340 }
5390 5341
5391 5342 /*
5392 5343 * ql_set_rnid_parameters
5393 5344 * Set RNID parameters.
5394 5345 *
5395 5346 * Input:
5396 5347 * ha: adapter state pointer.
5397 5348 * cmd: User space CT arguments pointer.
5398 5349 * mode: flags.
5399 5350 */
5400 5351 static void
5401 5352 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5402 5353 {
5403 5354 EXT_SET_RNID_REQ tmp_set;
5404 5355 EXT_RNID_DATA *tmp_buf;
5405 5356 int rval = 0;
5406 5357
5407 5358 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5408 5359
5409 5360 if (DRIVER_SUSPENDED(ha)) {
5410 5361 EL(ha, "failed, LOOP_NOT_READY\n");
5411 5362 cmd->Status = EXT_STATUS_BUSY;
5412 5363 cmd->ResponseLen = 0;
5413 5364 return;
5414 5365 }
5415 5366
5416 5367 cmd->ResponseLen = 0; /* NO response to caller. */
5417 5368 if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5418 5369 /* parameter error */
5419 5370 EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5420 5371 cmd->RequestLen);
5421 5372 cmd->Status = EXT_STATUS_INVALID_PARAM;
5422 5373 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5423 5374 cmd->ResponseLen = 0;
5424 5375 return;
5425 5376 }
5426 5377
5427 5378 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
↓ open down ↓ |
713 lines elided |
↑ open up ↑ |
5428 5379 cmd->RequestLen, mode);
5429 5380 if (rval != 0) {
5430 5381 EL(ha, "failed, ddi_copyin\n");
5431 5382 cmd->Status = EXT_STATUS_COPY_ERR;
5432 5383 cmd->ResponseLen = 0;
5433 5384 return;
5434 5385 }
5435 5386
5436 5387 /* Allocate memory for command. */
5437 5388 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5438 - if (tmp_buf == NULL) {
5439 - EL(ha, "failed, kmem_zalloc\n");
5440 - cmd->Status = EXT_STATUS_NO_MEMORY;
5441 - cmd->ResponseLen = 0;
5442 - return;
5443 - }
5444 5389
5445 5390 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5446 5391 (caddr_t)tmp_buf);
5447 5392 if (rval != QL_SUCCESS) {
5448 5393 /* error */
5449 5394 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5450 5395 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5451 5396 cmd->Status = EXT_STATUS_ERR;
5452 5397 cmd->ResponseLen = 0;
5453 5398 return;
5454 5399 }
5455 5400
5456 5401 /* Now set the requested params. */
5457 5402 bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5458 5403 bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5459 5404 bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5460 5405
5461 5406 rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5462 5407 (caddr_t)tmp_buf);
5463 5408 if (rval != QL_SUCCESS) {
5464 5409 /* error */
5465 5410 EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5466 5411 cmd->Status = EXT_STATUS_ERR;
5467 5412 cmd->ResponseLen = 0;
5468 5413 }
5469 5414
5470 5415 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5471 5416
5472 5417 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5473 5418 }
5474 5419
5475 5420 /*
5476 5421 * ql_get_rnid_parameters
5477 5422 * Get RNID parameters.
5478 5423 *
5479 5424 * Input:
5480 5425 * ha: adapter state pointer.
5481 5426 * cmd: User space CT arguments pointer.
5482 5427 * mode: flags.
5483 5428 */
5484 5429 static void
5485 5430 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5486 5431 {
5487 5432 EXT_RNID_DATA *tmp_buf;
5488 5433 uint32_t rval;
5489 5434
5490 5435 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
5491 5436
5492 5437 if (DRIVER_SUSPENDED(ha)) {
5493 5438 EL(ha, "failed, LOOP_NOT_READY\n");
5494 5439 cmd->Status = EXT_STATUS_BUSY;
5495 5440 cmd->ResponseLen = 0;
5496 5441 return;
5497 5442 }
5498 5443
5499 5444 /* Allocate memory for command. */
5500 5445 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5501 - if (tmp_buf == NULL) {
5502 - EL(ha, "failed, kmem_zalloc\n");
5503 - cmd->Status = EXT_STATUS_NO_MEMORY;
5504 - cmd->ResponseLen = 0;
5505 - return;
5506 - }
5507 5446
5508 5447 /* Send command */
5509 5448 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5510 5449 (caddr_t)tmp_buf);
5511 5450 if (rval != QL_SUCCESS) {
5512 5451 /* error */
5513 5452 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5514 5453 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5515 5454 cmd->Status = EXT_STATUS_ERR;
5516 5455 cmd->ResponseLen = 0;
5517 5456 return;
5518 5457 }
5519 5458
5520 5459 /* Copy the response */
5521 5460 if (ql_send_buffer_data((caddr_t)tmp_buf,
5522 5461 (caddr_t)(uintptr_t)cmd->ResponseAdr,
5523 5462 sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5524 5463 EL(ha, "failed, ddi_copyout\n");
5525 5464 cmd->Status = EXT_STATUS_COPY_ERR;
5526 5465 cmd->ResponseLen = 0;
5527 5466 } else {
5528 5467 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5529 5468 cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5530 5469 }
5531 5470
5532 5471 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5533 5472 }
5534 5473
5535 5474 /*
5536 5475 * ql_reset_statistics
5537 5476 * Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5538 5477 *
5539 5478 * Input:
5540 5479 * ha: adapter state pointer.
5541 5480 * cmd: Local EXT_IOCTL cmd struct pointer.
5542 5481 *
5543 5482 * Returns:
5544 5483 * None, request status indicated in cmd->Status.
5545 5484 *
5546 5485 * Context:
5547 5486 * Kernel context.
5548 5487 */
5549 5488 static int
5550 5489 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5551 5490 {
5552 5491 ql_xioctl_t *xp = ha->xioctl;
5553 5492 int rval = 0;
5554 5493
5555 5494 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5556 5495
5557 5496 if (DRIVER_SUSPENDED(ha)) {
5558 5497 EL(ha, "failed, LOOP_NOT_READY\n");
5559 5498 cmd->Status = EXT_STATUS_BUSY;
5560 5499 cmd->ResponseLen = 0;
5561 5500 return (QL_FUNCTION_SUSPENDED);
5562 5501 }
5563 5502
5564 5503 rval = ql_reset_link_status(ha);
5565 5504 if (rval != QL_SUCCESS) {
5566 5505 EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5567 5506 cmd->Status = EXT_STATUS_MAILBOX;
5568 5507 cmd->DetailStatus = rval;
5569 5508 cmd->ResponseLen = 0;
5570 5509 }
5571 5510
5572 5511 TASK_DAEMON_LOCK(ha);
5573 5512 xp->IosRequested = 0;
5574 5513 xp->BytesRequested = 0;
5575 5514 xp->IOInputRequests = 0;
5576 5515 xp->IOOutputRequests = 0;
5577 5516 xp->IOControlRequests = 0;
5578 5517 xp->IOInputMByteCnt = 0;
5579 5518 xp->IOOutputMByteCnt = 0;
5580 5519 xp->IOOutputByteCnt = 0;
5581 5520 xp->IOInputByteCnt = 0;
5582 5521 TASK_DAEMON_UNLOCK(ha);
5583 5522
5584 5523 INTR_LOCK(ha);
5585 5524 xp->ControllerErrorCount = 0;
5586 5525 xp->DeviceErrorCount = 0;
5587 5526 xp->TotalLipResets = 0;
5588 5527 xp->TotalInterrupts = 0;
5589 5528 INTR_UNLOCK(ha);
5590 5529
5591 5530 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5592 5531
5593 5532 return (rval);
5594 5533 }
5595 5534
5596 5535 /*
5597 5536 * ql_get_statistics
5598 5537 * Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5599 5538 *
5600 5539 * Input:
5601 5540 * ha: adapter state pointer.
5602 5541 * cmd: Local EXT_IOCTL cmd struct pointer.
5603 5542 * mode: flags.
5604 5543 *
5605 5544 * Returns:
5606 5545 * None, request status indicated in cmd->Status.
5607 5546 *
5608 5547 * Context:
5609 5548 * Kernel context.
5610 5549 */
5611 5550 static void
5612 5551 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5613 5552 {
5614 5553 EXT_HBA_PORT_STAT ps = {0};
5615 5554 ql_link_stats_t *ls;
5616 5555 int rval;
5617 5556 ql_xioctl_t *xp = ha->xioctl;
5618 5557 int retry = 10;
5619 5558
5620 5559 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5621 5560
5622 5561 while (ha->task_daemon_flags &
5623 5562 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5624 5563 ql_delay(ha, 10000000); /* 10 second delay */
5625 5564
5626 5565 retry--;
5627 5566
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
5628 5567 if (retry == 0) { /* effectively 100 seconds */
5629 5568 EL(ha, "failed, LOOP_NOT_READY\n");
5630 5569 cmd->Status = EXT_STATUS_BUSY;
5631 5570 cmd->ResponseLen = 0;
5632 5571 return;
5633 5572 }
5634 5573 }
5635 5574
5636 5575 /* Allocate memory for command. */
5637 5576 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5638 - if (ls == NULL) {
5639 - EL(ha, "failed, kmem_zalloc\n");
5640 - cmd->Status = EXT_STATUS_NO_MEMORY;
5641 - cmd->ResponseLen = 0;
5642 - return;
5643 - }
5644 5577
5645 5578 /*
5646 5579 * I think these are supposed to be port statistics
5647 5580 * the loop ID or port ID should be in cmd->Instance.
5648 5581 */
5649 5582 rval = ql_get_status_counts(ha, (uint16_t)
5650 5583 (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5651 5584 sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5652 5585 if (rval != QL_SUCCESS) {
5653 5586 EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5654 5587 ha->loop_id);
5655 5588 cmd->Status = EXT_STATUS_MAILBOX;
5656 5589 cmd->DetailStatus = rval;
5657 5590 cmd->ResponseLen = 0;
5658 5591 } else {
5659 5592 ps.ControllerErrorCount = xp->ControllerErrorCount;
5660 5593 ps.DeviceErrorCount = xp->DeviceErrorCount;
5661 5594 ps.IoCount = (uint32_t)(xp->IOInputRequests +
5662 5595 xp->IOOutputRequests + xp->IOControlRequests);
5663 5596 ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5664 5597 xp->IOOutputMByteCnt);
5665 5598 ps.LipResetCount = xp->TotalLipResets;
5666 5599 ps.InterruptCount = xp->TotalInterrupts;
5667 5600 ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5668 5601 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5669 5602 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5670 5603 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5671 5604 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5672 5605 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5673 5606
5674 5607 rval = ddi_copyout((void *)&ps,
5675 5608 (void *)(uintptr_t)cmd->ResponseAdr,
5676 5609 sizeof (EXT_HBA_PORT_STAT), mode);
5677 5610 if (rval != 0) {
5678 5611 EL(ha, "failed, ddi_copyout\n");
5679 5612 cmd->Status = EXT_STATUS_COPY_ERR;
5680 5613 cmd->ResponseLen = 0;
5681 5614 } else {
5682 5615 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5683 5616 }
5684 5617 }
5685 5618
5686 5619 kmem_free(ls, sizeof (ql_link_stats_t));
5687 5620
5688 5621 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5689 5622 }
5690 5623
5691 5624 /*
5692 5625 * ql_get_statistics_fc
5693 5626 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5694 5627 *
5695 5628 * Input:
5696 5629 * ha: adapter state pointer.
5697 5630 * cmd: Local EXT_IOCTL cmd struct pointer.
5698 5631 * mode: flags.
5699 5632 *
5700 5633 * Returns:
5701 5634 * None, request status indicated in cmd->Status.
5702 5635 *
5703 5636 * Context:
5704 5637 * Kernel context.
5705 5638 */
5706 5639 static void
5707 5640 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5708 5641 {
5709 5642 EXT_HBA_PORT_STAT ps = {0};
5710 5643 ql_link_stats_t *ls;
5711 5644 int rval;
5712 5645 uint16_t qlnt;
5713 5646 EXT_DEST_ADDR pextdestaddr;
5714 5647 uint8_t *name;
5715 5648 ql_tgt_t *tq = NULL;
5716 5649 int retry = 10;
5717 5650
5718 5651 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5719 5652
5720 5653 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5721 5654 (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5722 5655 EL(ha, "failed, ddi_copyin\n");
5723 5656 cmd->Status = EXT_STATUS_COPY_ERR;
5724 5657 cmd->ResponseLen = 0;
5725 5658 return;
5726 5659 }
5727 5660
5728 5661 qlnt = QLNT_PORT;
5729 5662 name = pextdestaddr.DestAddr.WWPN;
5730 5663
5731 5664 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5732 5665 ha->instance, name[0], name[1], name[2], name[3], name[4],
5733 5666 name[5], name[6], name[7]);
5734 5667
5735 5668 tq = ql_find_port(ha, name, qlnt);
5736 5669
5737 5670 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5738 5671 EL(ha, "failed, fc_port not found\n");
5739 5672 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5740 5673 cmd->ResponseLen = 0;
5741 5674 return;
5742 5675 }
5743 5676
5744 5677 while (ha->task_daemon_flags &
5745 5678 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5746 5679 ql_delay(ha, 10000000); /* 10 second delay */
5747 5680
5748 5681 retry--;
5749 5682
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
5750 5683 if (retry == 0) { /* effectively 100 seconds */
5751 5684 EL(ha, "failed, LOOP_NOT_READY\n");
5752 5685 cmd->Status = EXT_STATUS_BUSY;
5753 5686 cmd->ResponseLen = 0;
5754 5687 return;
5755 5688 }
5756 5689 }
5757 5690
5758 5691 /* Allocate memory for command. */
5759 5692 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5760 - if (ls == NULL) {
5761 - EL(ha, "failed, kmem_zalloc\n");
5762 - cmd->Status = EXT_STATUS_NO_MEMORY;
5763 - cmd->ResponseLen = 0;
5764 - return;
5765 - }
5766 5693
5767 5694 rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5768 5695 (caddr_t)ls, 0);
5769 5696 if (rval != QL_SUCCESS) {
5770 5697 EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5771 5698 tq->d_id.b24);
5772 5699 cmd->Status = EXT_STATUS_MAILBOX;
5773 5700 cmd->DetailStatus = rval;
5774 5701 cmd->ResponseLen = 0;
5775 5702 } else {
5776 5703 ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5777 5704 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5778 5705 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5779 5706 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5780 5707 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5781 5708 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5782 5709
5783 5710 rval = ddi_copyout((void *)&ps,
5784 5711 (void *)(uintptr_t)cmd->ResponseAdr,
5785 5712 sizeof (EXT_HBA_PORT_STAT), mode);
5786 5713
5787 5714 if (rval != 0) {
5788 5715 EL(ha, "failed, ddi_copyout\n");
5789 5716 cmd->Status = EXT_STATUS_COPY_ERR;
5790 5717 cmd->ResponseLen = 0;
5791 5718 } else {
5792 5719 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5793 5720 }
5794 5721 }
5795 5722
5796 5723 kmem_free(ls, sizeof (ql_link_stats_t));
5797 5724
5798 5725 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5799 5726 }
5800 5727
5801 5728 /*
5802 5729 * ql_get_statistics_fc4
5803 5730 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5804 5731 *
5805 5732 * Input:
5806 5733 * ha: adapter state pointer.
5807 5734 * cmd: Local EXT_IOCTL cmd struct pointer.
5808 5735 * mode: flags.
5809 5736 *
5810 5737 * Returns:
5811 5738 * None, request status indicated in cmd->Status.
5812 5739 *
5813 5740 * Context:
5814 5741 * Kernel context.
5815 5742 */
5816 5743 static void
5817 5744 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5818 5745 {
5819 5746 uint32_t rval;
5820 5747 EXT_HBA_FC4STATISTICS fc4stats = {0};
5821 5748 ql_xioctl_t *xp = ha->xioctl;
5822 5749
5823 5750 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5824 5751
5825 5752 fc4stats.InputRequests = xp->IOInputRequests;
5826 5753 fc4stats.OutputRequests = xp->IOOutputRequests;
5827 5754 fc4stats.ControlRequests = xp->IOControlRequests;
5828 5755 fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5829 5756 fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5830 5757
5831 5758 rval = ddi_copyout((void *)&fc4stats,
5832 5759 (void *)(uintptr_t)cmd->ResponseAdr,
5833 5760 sizeof (EXT_HBA_FC4STATISTICS), mode);
5834 5761
5835 5762 if (rval != 0) {
5836 5763 EL(ha, "failed, ddi_copyout\n");
5837 5764 cmd->Status = EXT_STATUS_COPY_ERR;
5838 5765 cmd->ResponseLen = 0;
5839 5766 } else {
5840 5767 cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5841 5768 }
5842 5769
5843 5770 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5844 5771 }
5845 5772
5846 5773 /*
5847 5774 * ql_set_led_state
5848 5775 * Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5849 5776 *
5850 5777 * Input:
5851 5778 * ha: adapter state pointer.
5852 5779 * cmd: Local EXT_IOCTL cmd struct pointer.
5853 5780 * mode: flags.
5854 5781 *
5855 5782 * Returns:
5856 5783 * None, request status indicated in cmd->Status.
5857 5784 *
5858 5785 * Context:
5859 5786 * Kernel context.
5860 5787 */
5861 5788 static void
5862 5789 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5863 5790 {
5864 5791 EXT_BEACON_CONTROL bstate;
5865 5792 uint32_t rval;
5866 5793 ql_xioctl_t *xp = ha->xioctl;
5867 5794
5868 5795 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5869 5796
5870 5797 if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5871 5798 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5872 5799 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5873 5800 EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5874 5801 " Len=%xh\n", cmd->RequestLen);
5875 5802 cmd->ResponseLen = 0;
5876 5803 return;
5877 5804 }
5878 5805
5879 5806 if (ha->device_id < 0x2300) {
5880 5807 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5881 5808 cmd->DetailStatus = 0;
5882 5809 EL(ha, "done - failed, Invalid function for HBA model\n");
5883 5810 cmd->ResponseLen = 0;
5884 5811 return;
5885 5812 }
5886 5813
5887 5814 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5888 5815 cmd->RequestLen, mode);
5889 5816
5890 5817 if (rval != 0) {
5891 5818 cmd->Status = EXT_STATUS_COPY_ERR;
5892 5819 EL(ha, "done - failed, ddi_copyin\n");
5893 5820 return;
5894 5821 }
5895 5822
5896 5823 switch (bstate.State) {
5897 5824 case EXT_DEF_GRN_BLINK_OFF: /* turn beacon off */
5898 5825 if (xp->ledstate.BeaconState == BEACON_OFF) {
5899 5826 /* not quite an error -- LED state is already off */
5900 5827 cmd->Status = EXT_STATUS_OK;
5901 5828 EL(ha, "LED off request -- LED is already off\n");
5902 5829 break;
5903 5830 }
5904 5831
5905 5832 xp->ledstate.BeaconState = BEACON_OFF;
5906 5833 xp->ledstate.LEDflags = LED_ALL_OFF;
5907 5834
5908 5835 if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5909 5836 cmd->Status = EXT_STATUS_MAILBOX;
5910 5837 } else {
5911 5838 cmd->Status = EXT_STATUS_OK;
5912 5839 }
5913 5840 break;
5914 5841
5915 5842 case EXT_DEF_GRN_BLINK_ON: /* turn beacon on */
5916 5843 if (xp->ledstate.BeaconState == BEACON_ON) {
5917 5844 /* not quite an error -- LED state is already on */
5918 5845 cmd->Status = EXT_STATUS_OK;
5919 5846 EL(ha, "LED on request - LED is already on\n");
5920 5847 break;
5921 5848 }
5922 5849
5923 5850 if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5924 5851 cmd->Status = EXT_STATUS_MAILBOX;
5925 5852 break;
5926 5853 }
5927 5854
5928 5855 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5929 5856 xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5930 5857 } else {
5931 5858 xp->ledstate.LEDflags = LED_GREEN;
5932 5859 }
5933 5860 xp->ledstate.BeaconState = BEACON_ON;
5934 5861
5935 5862 cmd->Status = EXT_STATUS_OK;
5936 5863 break;
5937 5864 default:
5938 5865 cmd->Status = EXT_STATUS_ERR;
5939 5866 EL(ha, "failed, unknown state request %xh\n", bstate.State);
5940 5867 break;
5941 5868 }
5942 5869
5943 5870 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5944 5871 }
5945 5872
5946 5873 /*
5947 5874 * ql_get_led_state
5948 5875 * Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5949 5876 *
5950 5877 * Input:
5951 5878 * ha: adapter state pointer.
5952 5879 * cmd: Local EXT_IOCTL cmd struct pointer.
5953 5880 * mode: flags.
5954 5881 *
5955 5882 * Returns:
5956 5883 * None, request status indicated in cmd->Status.
5957 5884 *
5958 5885 * Context:
5959 5886 * Kernel context.
5960 5887 */
5961 5888 static void
5962 5889 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5963 5890 {
5964 5891 EXT_BEACON_CONTROL bstate = {0};
5965 5892 uint32_t rval;
5966 5893 ql_xioctl_t *xp = ha->xioctl;
5967 5894
5968 5895 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5969 5896
5970 5897 if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5971 5898 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5972 5899 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5973 5900 EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5974 5901 "Len=%xh\n", cmd->ResponseLen);
5975 5902 cmd->ResponseLen = 0;
5976 5903 return;
5977 5904 }
5978 5905
5979 5906 if (ha->device_id < 0x2300) {
5980 5907 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5981 5908 cmd->DetailStatus = 0;
5982 5909 EL(ha, "done - failed, Invalid function for HBA model\n");
5983 5910 cmd->ResponseLen = 0;
5984 5911 return;
5985 5912 }
5986 5913
5987 5914 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5988 5915 cmd->Status = EXT_STATUS_BUSY;
5989 5916 EL(ha, "done - failed, isp abort active\n");
5990 5917 cmd->ResponseLen = 0;
5991 5918 return;
5992 5919 }
5993 5920
5994 5921 /* inform the user of the current beacon state (off or on) */
5995 5922 bstate.State = xp->ledstate.BeaconState;
5996 5923
5997 5924 rval = ddi_copyout((void *)&bstate,
5998 5925 (void *)(uintptr_t)cmd->ResponseAdr,
5999 5926 sizeof (EXT_BEACON_CONTROL), mode);
6000 5927
6001 5928 if (rval != 0) {
6002 5929 EL(ha, "failed, ddi_copyout\n");
6003 5930 cmd->Status = EXT_STATUS_COPY_ERR;
6004 5931 cmd->ResponseLen = 0;
6005 5932 } else {
6006 5933 cmd->Status = EXT_STATUS_OK;
6007 5934 cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6008 5935 }
6009 5936
6010 5937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6011 5938 }
6012 5939
6013 5940 /*
6014 5941 * ql_blink_led
6015 5942 * Determine the next state of the LED and drive it
6016 5943 *
6017 5944 * Input:
6018 5945 * ha: adapter state pointer.
6019 5946 *
6020 5947 * Context:
6021 5948 * Interrupt context.
6022 5949 */
6023 5950 void
6024 5951 ql_blink_led(ql_adapter_state_t *ha)
6025 5952 {
6026 5953 uint32_t nextstate;
6027 5954 ql_xioctl_t *xp = ha->xioctl;
6028 5955
6029 5956 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6030 5957
6031 5958 if (xp->ledstate.BeaconState == BEACON_ON) {
6032 5959 /* determine the next led state */
6033 5960 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6034 5961 nextstate = (xp->ledstate.LEDflags) &
6035 5962 (~(RD32_IO_REG(ha, gpiod)));
6036 5963 } else {
6037 5964 nextstate = (xp->ledstate.LEDflags) &
6038 5965 (~(RD16_IO_REG(ha, gpiod)));
6039 5966 }
6040 5967
6041 5968 /* turn the led on or off */
6042 5969 ql_drive_led(ha, nextstate);
6043 5970 }
6044 5971
6045 5972 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6046 5973 }
6047 5974
6048 5975 /*
6049 5976 * ql_drive_led
6050 5977 * drive the led's as determined by LEDflags
6051 5978 *
6052 5979 * Input:
6053 5980 * ha: adapter state pointer.
6054 5981 * LEDflags: LED flags
6055 5982 *
6056 5983 * Context:
6057 5984 * Kernel/Interrupt context.
6058 5985 */
6059 5986 static void
6060 5987 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6061 5988 {
6062 5989
6063 5990 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6064 5991
6065 5992 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
6066 5993
6067 5994 uint16_t gpio_enable, gpio_data;
6068 5995
6069 5996 /* setup to send new data */
6070 5997 gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6071 5998 gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6072 5999 WRT16_IO_REG(ha, gpioe, gpio_enable);
6073 6000
6074 6001 /* read current data and clear out old led data */
6075 6002 gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6076 6003 gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6077 6004
6078 6005 /* set in the new led data. */
6079 6006 gpio_data = (uint16_t)(gpio_data | LEDflags);
6080 6007
6081 6008 /* write out the new led data */
6082 6009 WRT16_IO_REG(ha, gpiod, gpio_data);
6083 6010
6084 6011 } else if (CFG_IST(ha, CFG_CTRL_24258081)) {
6085 6012
6086 6013 uint32_t gpio_data;
6087 6014
6088 6015 /* setup to send new data */
6089 6016 gpio_data = RD32_IO_REG(ha, gpiod);
6090 6017 gpio_data |= LED_MASK_UPDATE_24;
6091 6018 WRT32_IO_REG(ha, gpiod, gpio_data);
6092 6019
6093 6020 /* read current data and clear out old led data */
6094 6021 gpio_data = RD32_IO_REG(ha, gpiod);
6095 6022 gpio_data &= ~LED_MASK_COLORS_24;
6096 6023
6097 6024 /* set in the new led data */
6098 6025 gpio_data |= LEDflags;
6099 6026
6100 6027 /* write out the new led data */
6101 6028 WRT32_IO_REG(ha, gpiod, gpio_data);
6102 6029
6103 6030 } else {
6104 6031 EL(ha, "unsupported HBA: %xh", ha->device_id);
6105 6032 }
6106 6033
6107 6034 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6108 6035 }
6109 6036
6110 6037 /*
6111 6038 * ql_setup_led
6112 6039 * Setup LED for driver control
6113 6040 *
6114 6041 * Input:
6115 6042 * ha: adapter state pointer.
6116 6043 *
6117 6044 * Context:
6118 6045 * Kernel/Interrupt context.
6119 6046 */
6120 6047 static uint32_t
6121 6048 ql_setup_led(ql_adapter_state_t *ha)
6122 6049 {
6123 6050 uint32_t rval;
6124 6051 ql_mbx_data_t mr;
6125 6052
6126 6053 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6127 6054
6128 6055 /* decouple the LED control from the fw */
6129 6056 rval = ql_get_firmware_option(ha, &mr);
6130 6057 if (rval != QL_SUCCESS) {
6131 6058 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6132 6059 return (rval);
6133 6060 }
6134 6061
6135 6062 /* set the appropriate options */
6136 6063 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6137 6064
6138 6065 /* send it back to the firmware */
6139 6066 rval = ql_set_firmware_option(ha, &mr);
6140 6067 if (rval != QL_SUCCESS) {
6141 6068 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6142 6069 return (rval);
6143 6070 }
6144 6071
6145 6072 /* initally, turn the LED's off */
6146 6073 ql_drive_led(ha, LED_ALL_OFF);
6147 6074
6148 6075 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6149 6076
6150 6077 return (rval);
6151 6078 }
6152 6079
6153 6080 /*
6154 6081 * ql_wrapup_led
6155 6082 * Return LED control to the firmware
6156 6083 *
6157 6084 * Input:
6158 6085 * ha: adapter state pointer.
6159 6086 *
6160 6087 * Context:
6161 6088 * Kernel/Interrupt context.
6162 6089 */
6163 6090 static uint32_t
6164 6091 ql_wrapup_led(ql_adapter_state_t *ha)
6165 6092 {
6166 6093 uint32_t rval;
6167 6094 ql_mbx_data_t mr;
6168 6095
6169 6096 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6170 6097
6171 6098 /* Turn all LED's off */
6172 6099 ql_drive_led(ha, LED_ALL_OFF);
6173 6100
6174 6101 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6175 6102
6176 6103 uint32_t gpio_data;
6177 6104
6178 6105 /* disable the LED update mask */
6179 6106 gpio_data = RD32_IO_REG(ha, gpiod);
6180 6107 gpio_data &= ~LED_MASK_UPDATE_24;
6181 6108
6182 6109 /* write out the data */
6183 6110 WRT32_IO_REG(ha, gpiod, gpio_data);
6184 6111 }
6185 6112
6186 6113 /* give LED control back to the f/w */
6187 6114 rval = ql_get_firmware_option(ha, &mr);
6188 6115 if (rval != QL_SUCCESS) {
6189 6116 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6190 6117 return (rval);
6191 6118 }
6192 6119
6193 6120 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6194 6121
6195 6122 rval = ql_set_firmware_option(ha, &mr);
6196 6123 if (rval != QL_SUCCESS) {
6197 6124 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6198 6125 return (rval);
6199 6126 }
6200 6127
6201 6128 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6202 6129
6203 6130 return (rval);
6204 6131 }
6205 6132
6206 6133 /*
6207 6134 * ql_get_port_summary
6208 6135 * Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6209 6136 *
6210 6137 * The EXT_IOCTL->RequestAdr points to a single
6211 6138 * UINT32 which identifies the device type.
6212 6139 *
6213 6140 * Input:
6214 6141 * ha: adapter state pointer.
6215 6142 * cmd: Local EXT_IOCTL cmd struct pointer.
6216 6143 * mode: flags.
6217 6144 *
6218 6145 * Returns:
6219 6146 * None, request status indicated in cmd->Status.
6220 6147 *
6221 6148 * Context:
6222 6149 * Kernel context.
6223 6150 */
6224 6151 static void
6225 6152 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6226 6153 {
6227 6154 EXT_DEVICEDATA dd = {0};
6228 6155 EXT_DEVICEDATA *uddp;
6229 6156 ql_link_t *link;
6230 6157 ql_tgt_t *tq;
6231 6158 uint32_t rlen, dev_type, index;
6232 6159 int rval = 0;
6233 6160 EXT_DEVICEDATAENTRY *uddep, *ddep;
6234 6161
6235 6162 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6236 6163
6237 6164 ddep = &dd.EntryList[0];
6238 6165
6239 6166 /*
6240 6167 * Get the type of device the requestor is looking for.
6241 6168 *
6242 6169 * We ignore this for now.
6243 6170 */
6244 6171 rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6245 6172 (void *)&dev_type, sizeof (dev_type), mode);
6246 6173 if (rval != 0) {
6247 6174 cmd->Status = EXT_STATUS_COPY_ERR;
6248 6175 cmd->ResponseLen = 0;
6249 6176 EL(ha, "failed, ddi_copyin\n");
6250 6177 return;
6251 6178 }
6252 6179 /*
6253 6180 * Count the number of entries to be returned. Count devices
6254 6181 * that are offlline, but have been persistently bound.
6255 6182 */
6256 6183 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6257 6184 for (link = ha->dev[index].first; link != NULL;
6258 6185 link = link->next) {
6259 6186 tq = link->base_address;
6260 6187 if (tq->flags & TQF_INITIATOR_DEVICE ||
6261 6188 !VALID_TARGET_ID(ha, tq->loop_id)) {
6262 6189 continue; /* Skip this one */
6263 6190 }
6264 6191 dd.TotalDevices++;
6265 6192 }
6266 6193 }
6267 6194 /*
6268 6195 * Compute the number of entries that can be returned
6269 6196 * based upon the size of caller's response buffer.
6270 6197 */
6271 6198 dd.ReturnListEntryCount = 0;
6272 6199 if (dd.TotalDevices == 0) {
6273 6200 rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6274 6201 } else {
6275 6202 rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6276 6203 (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6277 6204 }
6278 6205 if (rlen > cmd->ResponseLen) {
6279 6206 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6280 6207 cmd->DetailStatus = rlen;
6281 6208 EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6282 6209 rlen, cmd->ResponseLen);
6283 6210 cmd->ResponseLen = 0;
6284 6211 return;
6285 6212 }
6286 6213 cmd->ResponseLen = 0;
6287 6214 uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6288 6215 uddep = &uddp->EntryList[0];
6289 6216 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6290 6217 for (link = ha->dev[index].first; link != NULL;
6291 6218 link = link->next) {
6292 6219 tq = link->base_address;
6293 6220 if (tq->flags & TQF_INITIATOR_DEVICE ||
6294 6221 !VALID_TARGET_ID(ha, tq->loop_id)) {
6295 6222 continue; /* Skip this one */
6296 6223 }
6297 6224
6298 6225 bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6299 6226
6300 6227 bcopy(tq->node_name, ddep->NodeWWN, 8);
6301 6228 bcopy(tq->port_name, ddep->PortWWN, 8);
6302 6229
6303 6230 ddep->PortID[0] = tq->d_id.b.domain;
6304 6231 ddep->PortID[1] = tq->d_id.b.area;
6305 6232 ddep->PortID[2] = tq->d_id.b.al_pa;
6306 6233
6307 6234 bcopy(tq->port_name,
6308 6235 (caddr_t)&ddep->TargetAddress.Target, 8);
6309 6236
6310 6237 ddep->DeviceFlags = tq->flags;
6311 6238 ddep->LoopID = tq->loop_id;
6312 6239 QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6313 6240 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6314 6241 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6315 6242 ha->instance, ddep->TargetAddress.Target,
6316 6243 ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6317 6244 ddep->NodeWWN[2], ddep->NodeWWN[3],
6318 6245 ddep->NodeWWN[4], ddep->NodeWWN[5],
6319 6246 ddep->NodeWWN[6], ddep->NodeWWN[7],
6320 6247 ddep->PortWWN[0], ddep->PortWWN[1],
6321 6248 ddep->PortWWN[2], ddep->PortWWN[3],
6322 6249 ddep->PortWWN[4], ddep->PortWWN[5],
6323 6250 ddep->PortWWN[6], ddep->PortWWN[7]);
6324 6251 rval = ddi_copyout((void *)ddep, (void *)uddep,
6325 6252 sizeof (EXT_DEVICEDATAENTRY), mode);
6326 6253
6327 6254 if (rval != 0) {
6328 6255 cmd->Status = EXT_STATUS_COPY_ERR;
6329 6256 cmd->ResponseLen = 0;
6330 6257 EL(ha, "failed, ddi_copyout\n");
6331 6258 break;
6332 6259 }
6333 6260 dd.ReturnListEntryCount++;
6334 6261 uddep++;
6335 6262 cmd->ResponseLen += (uint32_t)
6336 6263 sizeof (EXT_DEVICEDATAENTRY);
6337 6264 }
6338 6265 }
6339 6266 rval = ddi_copyout((void *)&dd, (void *)uddp,
6340 6267 sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6341 6268
6342 6269 if (rval != 0) {
6343 6270 cmd->Status = EXT_STATUS_COPY_ERR;
6344 6271 cmd->ResponseLen = 0;
6345 6272 EL(ha, "failed, ddi_copyout-2\n");
6346 6273 } else {
6347 6274 cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6348 6275 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6349 6276 }
6350 6277 }
6351 6278
6352 6279 /*
6353 6280 * ql_get_target_id
6354 6281 * Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6355 6282 *
6356 6283 * Input:
6357 6284 * ha: adapter state pointer.
6358 6285 * cmd: Local EXT_IOCTL cmd struct pointer.
6359 6286 * mode: flags.
6360 6287 *
6361 6288 * Returns:
6362 6289 * None, request status indicated in cmd->Status.
6363 6290 *
6364 6291 * Context:
6365 6292 * Kernel context.
6366 6293 */
6367 6294 static void
6368 6295 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6369 6296 {
6370 6297 uint32_t rval;
6371 6298 uint16_t qlnt;
6372 6299 EXT_DEST_ADDR extdestaddr = {0};
6373 6300 uint8_t *name;
6374 6301 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
6375 6302 ql_tgt_t *tq;
6376 6303
6377 6304 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6378 6305
6379 6306 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6380 6307 (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6381 6308 EL(ha, "failed, ddi_copyin\n");
6382 6309 cmd->Status = EXT_STATUS_COPY_ERR;
6383 6310 cmd->ResponseLen = 0;
6384 6311 return;
6385 6312 }
6386 6313
6387 6314 qlnt = QLNT_PORT;
6388 6315 name = wwpn;
6389 6316 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6390 6317 ha->instance, name[0], name[1], name[2], name[3], name[4],
6391 6318 name[5], name[6], name[7]);
6392 6319
6393 6320 tq = ql_find_port(ha, name, qlnt);
6394 6321 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6395 6322 EL(ha, "failed, fc_port not found\n");
6396 6323 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6397 6324 cmd->ResponseLen = 0;
6398 6325 return;
6399 6326 }
6400 6327
6401 6328 bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6402 6329
6403 6330 rval = ddi_copyout((void *)&extdestaddr,
6404 6331 (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6405 6332 if (rval != 0) {
6406 6333 EL(ha, "failed, ddi_copyout\n");
6407 6334 cmd->Status = EXT_STATUS_COPY_ERR;
6408 6335 cmd->ResponseLen = 0;
6409 6336 }
6410 6337
6411 6338 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6412 6339 }
6413 6340
6414 6341 /*
6415 6342 * ql_setup_fcache
6416 6343 * Populates selected flash sections into the cache
6417 6344 *
6418 6345 * Input:
6419 6346 * ha = adapter state pointer.
6420 6347 *
6421 6348 * Returns:
6422 6349 * ql local function return status code.
6423 6350 *
6424 6351 * Context:
6425 6352 * Kernel context.
6426 6353 *
6427 6354 * Note:
6428 6355 * Driver must be in stalled state prior to entering or
6429 6356 * add code to this function prior to calling ql_setup_flash()
6430 6357 */
6431 6358 int
6432 6359 ql_setup_fcache(ql_adapter_state_t *ha)
6433 6360 {
6434 6361 int rval;
6435 6362 uint32_t freadpos = 0;
6436 6363 uint32_t fw_done = 0;
6437 6364 ql_fcache_t *head = NULL;
6438 6365 ql_fcache_t *tail = NULL;
6439 6366 ql_fcache_t *ftmp;
6440 6367
6441 6368 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6442 6369
6443 6370 CACHE_LOCK(ha);
6444 6371
6445 6372 /* If we already have populated it, rtn */
6446 6373 if (ha->fcache != NULL) {
6447 6374 CACHE_UNLOCK(ha);
6448 6375 EL(ha, "buffer already populated\n");
6449 6376 return (QL_SUCCESS);
6450 6377 }
6451 6378
6452 6379 ql_flash_nvram_defaults(ha);
6453 6380
6454 6381 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6455 6382 CACHE_UNLOCK(ha);
6456 6383 EL(ha, "unable to setup flash; rval=%xh\n", rval);
6457 6384 return (rval);
6458 6385 }
6459 6386
6460 6387 while (freadpos != 0xffffffff) {
6461 6388 /* Allocate & populate this node */
6462 6389 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6463 6390 EL(ha, "node alloc failed\n");
6464 6391 rval = QL_FUNCTION_FAILED;
6465 6392 break;
6466 6393 }
6467 6394
6468 6395 /* link in the new node */
6469 6396 if (head == NULL) {
6470 6397 head = tail = ftmp;
6471 6398 } else {
6472 6399 tail->next = ftmp;
6473 6400 tail = ftmp;
6474 6401 }
6475 6402
6476 6403 /* Do the firmware node first for 24xx/25xx's */
6477 6404 if (fw_done == 0) {
6478 6405 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6479 6406 freadpos = ha->flash_fw_addr << 2;
6480 6407 }
6481 6408 fw_done = 1;
6482 6409 }
6483 6410
6484 6411 if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6485 6412 freadpos)) != QL_SUCCESS) {
6486 6413 EL(ha, "failed, 24xx dump_fcode"
6487 6414 " pos=%xh rval=%xh\n", freadpos, rval);
6488 6415 rval = QL_FUNCTION_FAILED;
6489 6416 break;
6490 6417 }
6491 6418
6492 6419 /* checkout the pci data / format */
6493 6420 if (ql_check_pci(ha, ftmp, &freadpos)) {
6494 6421 EL(ha, "flash header incorrect\n");
6495 6422 rval = QL_FUNCTION_FAILED;
6496 6423 break;
6497 6424 }
6498 6425 }
6499 6426
6500 6427 if (rval != QL_SUCCESS) {
6501 6428 /* release all resources we have */
6502 6429 ftmp = head;
6503 6430 while (ftmp != NULL) {
6504 6431 tail = ftmp->next;
6505 6432 kmem_free(ftmp->buf, FBUFSIZE);
6506 6433 kmem_free(ftmp, sizeof (ql_fcache_t));
6507 6434 ftmp = tail;
6508 6435 }
6509 6436
6510 6437 EL(ha, "failed, done\n");
6511 6438 } else {
6512 6439 ha->fcache = head;
6513 6440 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6514 6441 }
6515 6442 CACHE_UNLOCK(ha);
6516 6443
6517 6444 return (rval);
6518 6445 }
6519 6446
6520 6447 /*
6521 6448 * ql_update_fcache
6522 6449 * re-populates updated flash into the fcache. If
6523 6450 * fcache does not exist (e.g., flash was empty/invalid on
6524 6451 * boot), this routine will create and the populate it.
6525 6452 *
6526 6453 * Input:
6527 6454 * ha = adapter state pointer.
6528 6455 * *bpf = Pointer to flash buffer.
6529 6456 * bsize = Size of flash buffer.
6530 6457 *
6531 6458 * Returns:
6532 6459 *
6533 6460 * Context:
6534 6461 * Kernel context.
6535 6462 */
6536 6463 void
6537 6464 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6538 6465 {
6539 6466 int rval = QL_SUCCESS;
6540 6467 uint32_t freadpos = 0;
6541 6468 uint32_t fw_done = 0;
6542 6469 ql_fcache_t *head = NULL;
6543 6470 ql_fcache_t *tail = NULL;
6544 6471 ql_fcache_t *ftmp;
6545 6472
6546 6473 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6547 6474
6548 6475 while (freadpos != 0xffffffff) {
6549 6476
6550 6477 /* Allocate & populate this node */
6551 6478
6552 6479 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6553 6480 EL(ha, "node alloc failed\n");
6554 6481 rval = QL_FUNCTION_FAILED;
6555 6482 break;
6556 6483 }
6557 6484
6558 6485 /* link in the new node */
6559 6486 if (head == NULL) {
6560 6487 head = tail = ftmp;
6561 6488 } else {
6562 6489 tail->next = ftmp;
6563 6490 tail = ftmp;
6564 6491 }
6565 6492
6566 6493 /* Do the firmware node first for 24xx's */
6567 6494 if (fw_done == 0) {
6568 6495 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6569 6496 freadpos = ha->flash_fw_addr << 2;
6570 6497 }
6571 6498 fw_done = 1;
6572 6499 }
6573 6500
6574 6501 /* read in first FBUFSIZE bytes of this flash section */
6575 6502 if (freadpos+FBUFSIZE > bsize) {
6576 6503 EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6577 6504 freadpos, bsize);
6578 6505 rval = QL_FUNCTION_FAILED;
6579 6506 break;
6580 6507 }
6581 6508 bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6582 6509
6583 6510 /* checkout the pci data / format */
6584 6511 if (ql_check_pci(ha, ftmp, &freadpos)) {
6585 6512 EL(ha, "flash header incorrect\n");
6586 6513 rval = QL_FUNCTION_FAILED;
6587 6514 break;
6588 6515 }
6589 6516 }
6590 6517
6591 6518 if (rval != QL_SUCCESS) {
6592 6519 /*
6593 6520 * release all resources we have
6594 6521 */
6595 6522 ql_fcache_rel(head);
6596 6523 EL(ha, "failed, done\n");
6597 6524 } else {
6598 6525 /*
6599 6526 * Release previous fcache resources and update with new
6600 6527 */
6601 6528 CACHE_LOCK(ha);
6602 6529 ql_fcache_rel(ha->fcache);
6603 6530 ha->fcache = head;
6604 6531 CACHE_UNLOCK(ha);
6605 6532
6606 6533 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6607 6534 }
6608 6535 }
6609 6536
6610 6537 /*
6611 6538 * ql_setup_fnode
6612 6539 * Allocates fcache node
6613 6540 *
6614 6541 * Input:
6615 6542 * ha = adapter state pointer.
6616 6543 * node = point to allocated fcache node (NULL = failed)
6617 6544 *
6618 6545 * Returns:
6619 6546 *
6620 6547 * Context:
6621 6548 * Kernel context.
6622 6549 *
6623 6550 * Note:
6624 6551 * Driver must be in stalled state prior to entering or
6625 6552 * add code to this function prior to calling ql_setup_flash()
6626 6553 */
6627 6554 static ql_fcache_t *
6628 6555 ql_setup_fnode(ql_adapter_state_t *ha)
6629 6556 {
6630 6557 ql_fcache_t *fnode = NULL;
6631 6558
6632 6559 if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6633 6560 KM_SLEEP))) == NULL) {
6634 6561 EL(ha, "fnode alloc failed\n");
6635 6562 fnode = NULL;
6636 6563 } else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6637 6564 KM_SLEEP))) == NULL) {
6638 6565 EL(ha, "buf alloc failed\n");
6639 6566 kmem_free(fnode, sizeof (ql_fcache_t));
6640 6567 fnode = NULL;
6641 6568 } else {
6642 6569 fnode->buflen = FBUFSIZE;
6643 6570 }
6644 6571
6645 6572 return (fnode);
6646 6573 }
6647 6574
6648 6575 /*
6649 6576 * ql_fcache_rel
6650 6577 * Releases the fcache resources
6651 6578 *
6652 6579 * Input:
6653 6580 * ha = adapter state pointer.
6654 6581 * head = Pointer to fcache linked list
6655 6582 *
6656 6583 * Returns:
6657 6584 *
6658 6585 * Context:
6659 6586 * Kernel context.
6660 6587 *
6661 6588 */
6662 6589 void
6663 6590 ql_fcache_rel(ql_fcache_t *head)
6664 6591 {
6665 6592 ql_fcache_t *ftmp = head;
6666 6593 ql_fcache_t *tail;
6667 6594
6668 6595 /* release all resources we have */
6669 6596 while (ftmp != NULL) {
6670 6597 tail = ftmp->next;
6671 6598 kmem_free(ftmp->buf, FBUFSIZE);
6672 6599 kmem_free(ftmp, sizeof (ql_fcache_t));
6673 6600 ftmp = tail;
6674 6601 }
6675 6602 }
6676 6603
6677 6604 /*
6678 6605 * ql_update_flash_caches
6679 6606 * Updates driver flash caches
6680 6607 *
6681 6608 * Input:
6682 6609 * ha: adapter state pointer.
6683 6610 *
6684 6611 * Context:
6685 6612 * Kernel context.
6686 6613 */
6687 6614 static void
6688 6615 ql_update_flash_caches(ql_adapter_state_t *ha)
6689 6616 {
6690 6617 uint32_t len;
6691 6618 ql_link_t *link;
6692 6619 ql_adapter_state_t *ha2;
6693 6620
6694 6621 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6695 6622
6696 6623 /* Get base path length. */
6697 6624 for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6698 6625 if (ha->devpath[len] == ',' ||
6699 6626 ha->devpath[len] == '@') {
6700 6627 break;
6701 6628 }
6702 6629 }
6703 6630
6704 6631 /* Reset fcache on all adapter instances. */
6705 6632 for (link = ql_hba.first; link != NULL; link = link->next) {
6706 6633 ha2 = link->base_address;
6707 6634
6708 6635 if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6709 6636 continue;
6710 6637 }
6711 6638
6712 6639 CACHE_LOCK(ha2);
6713 6640 ql_fcache_rel(ha2->fcache);
6714 6641 ha2->fcache = NULL;
6715 6642
6716 6643 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6717 6644 if (ha2->vcache != NULL) {
6718 6645 kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6719 6646 ha2->vcache = NULL;
6720 6647 }
6721 6648 }
6722 6649 CACHE_UNLOCK(ha2);
6723 6650
6724 6651 (void) ql_setup_fcache(ha2);
6725 6652 }
6726 6653
6727 6654 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6728 6655 }
6729 6656
6730 6657 /*
6731 6658 * ql_get_fbuf
6732 6659 * Search the fcache list for the type specified
6733 6660 *
6734 6661 * Input:
6735 6662 * fptr = Pointer to fcache linked list
6736 6663 * ftype = Type of image to be returned.
6737 6664 *
6738 6665 * Returns:
6739 6666 * Pointer to ql_fcache_t.
6740 6667 * NULL means not found.
6741 6668 *
6742 6669 * Context:
6743 6670 * Kernel context.
6744 6671 *
6745 6672 *
6746 6673 */
6747 6674 ql_fcache_t *
6748 6675 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6749 6676 {
6750 6677 while (fptr != NULL) {
6751 6678 /* does this image meet criteria? */
6752 6679 if (ftype & fptr->type) {
6753 6680 break;
6754 6681 }
6755 6682 fptr = fptr->next;
6756 6683 }
6757 6684 return (fptr);
6758 6685 }
6759 6686
6760 6687 /*
6761 6688 * ql_check_pci
6762 6689 *
6763 6690 * checks the passed buffer for a valid pci signature and
6764 6691 * expected (and in range) pci length values.
6765 6692 *
6766 6693 * For firmware type, a pci header is added since the image in
6767 6694 * the flash does not have one (!!!).
6768 6695 *
6769 6696 * On successful pci check, nextpos adjusted to next pci header.
6770 6697 *
6771 6698 * Returns:
6772 6699 * -1 --> last pci image
6773 6700 * 0 --> pci header valid
6774 6701 * 1 --> pci header invalid.
6775 6702 *
6776 6703 * Context:
6777 6704 * Kernel context.
6778 6705 */
6779 6706 static int
6780 6707 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6781 6708 {
6782 6709 pci_header_t *pcih;
6783 6710 pci_data_t *pcid;
6784 6711 uint32_t doff;
6785 6712 uint8_t *pciinfo;
6786 6713
6787 6714 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6788 6715
6789 6716 if (fcache != NULL) {
6790 6717 pciinfo = fcache->buf;
6791 6718 } else {
6792 6719 EL(ha, "failed, null fcache ptr passed\n");
6793 6720 return (1);
6794 6721 }
6795 6722
6796 6723 if (pciinfo == NULL) {
6797 6724 EL(ha, "failed, null pciinfo ptr passed\n");
6798 6725 return (1);
6799 6726 }
6800 6727
6801 6728 if (CFG_IST(ha, CFG_SBUS_CARD)) {
6802 6729 caddr_t bufp;
6803 6730 uint_t len;
6804 6731
6805 6732 if (pciinfo[0] != SBUS_CODE_FCODE) {
6806 6733 EL(ha, "failed, unable to detect sbus fcode\n");
6807 6734 return (1);
6808 6735 }
6809 6736 fcache->type = FTYPE_FCODE;
6810 6737
6811 6738 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6812 6739 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6813 6740 PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6814 6741 DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6815 6742 (int *)&len) == DDI_PROP_SUCCESS) {
6816 6743
6817 6744 (void) snprintf(fcache->verstr,
6818 6745 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6819 6746 kmem_free(bufp, len);
6820 6747 }
6821 6748
6822 6749 *nextpos = 0xffffffff;
6823 6750
6824 6751 QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6825 6752 ha->instance);
6826 6753
6827 6754 return (0);
6828 6755 }
6829 6756
6830 6757 if (*nextpos == ha->flash_fw_addr << 2) {
6831 6758
6832 6759 pci_header_t fwh = {0};
6833 6760 pci_data_t fwd = {0};
6834 6761 uint8_t *buf, *bufp;
6835 6762
6836 6763 /*
6837 6764 * Build a pci header for the firmware module
6838 6765 */
6839 6766 if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6840 6767 NULL) {
6841 6768 EL(ha, "failed, unable to allocate buffer\n");
6842 6769 return (1);
6843 6770 }
6844 6771
6845 6772 fwh.signature[0] = PCI_HEADER0;
6846 6773 fwh.signature[1] = PCI_HEADER1;
6847 6774 fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6848 6775 fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6849 6776
6850 6777 fwd.signature[0] = 'P';
6851 6778 fwd.signature[1] = 'C';
6852 6779 fwd.signature[2] = 'I';
6853 6780 fwd.signature[3] = 'R';
6854 6781 fwd.codetype = PCI_CODE_FW;
6855 6782 fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6856 6783 fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6857 6784
6858 6785 bufp = buf;
6859 6786 bcopy(&fwh, bufp, sizeof (pci_header_t));
6860 6787 bufp += sizeof (pci_header_t);
6861 6788 bcopy(&fwd, bufp, sizeof (pci_data_t));
6862 6789 bufp += sizeof (pci_data_t);
6863 6790
6864 6791 bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6865 6792 sizeof (pci_data_t)));
6866 6793 bcopy(buf, fcache->buf, FBUFSIZE);
6867 6794
6868 6795 fcache->type = FTYPE_FW;
6869 6796
6870 6797 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6871 6798 "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6872 6799 fcache->buf[27]);
6873 6800
6874 6801 if (CFG_IST(ha, CFG_CTRL_81XX)) {
6875 6802 *nextpos = 0x200000;
6876 6803 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
6877 6804 *nextpos = 0x80000;
6878 6805 } else {
6879 6806 *nextpos = 0;
6880 6807 }
6881 6808 kmem_free(buf, FBUFSIZE);
6882 6809
6883 6810 QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6884 6811
6885 6812 return (0);
6886 6813 }
6887 6814
6888 6815 /* get to the pci header image length */
6889 6816 pcih = (pci_header_t *)pciinfo;
6890 6817
6891 6818 doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6892 6819
6893 6820 /* some header section sanity check */
6894 6821 if (pcih->signature[0] != PCI_HEADER0 ||
6895 6822 pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6896 6823 EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6897 6824 pcih->signature[0], pcih->signature[1], doff);
6898 6825 return (1);
6899 6826 }
6900 6827
6901 6828 pcid = (pci_data_t *)(pciinfo + doff);
6902 6829
6903 6830 /* a slight sanity data section check */
6904 6831 if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6905 6832 pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6906 6833 EL(ha, "failed, data sig mismatch!\n");
6907 6834 return (1);
6908 6835 }
6909 6836
6910 6837 if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6911 6838 QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance);
6912 6839 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6913 6840 ql_flash_layout_table(ha, *nextpos +
6914 6841 (pcid->imagelength[0] | (pcid->imagelength[1] <<
6915 6842 8)) * PCI_SECTOR_SIZE);
6916 6843 (void) ql_24xx_flash_desc(ha);
6917 6844 }
6918 6845 *nextpos = 0xffffffff;
6919 6846 } else {
6920 6847 /* adjust the next flash read start position */
6921 6848 *nextpos += (pcid->imagelength[0] |
6922 6849 (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6923 6850 }
6924 6851
6925 6852 switch (pcid->codetype) {
6926 6853 case PCI_CODE_X86PC:
6927 6854 fcache->type = FTYPE_BIOS;
6928 6855 break;
6929 6856 case PCI_CODE_FCODE:
6930 6857 fcache->type = FTYPE_FCODE;
6931 6858 break;
6932 6859 case PCI_CODE_EFI:
6933 6860 fcache->type = FTYPE_EFI;
6934 6861 break;
6935 6862 case PCI_CODE_HPPA:
6936 6863 fcache->type = FTYPE_HPPA;
6937 6864 break;
6938 6865 default:
6939 6866 fcache->type = FTYPE_UNKNOWN;
6940 6867 break;
6941 6868 }
6942 6869
6943 6870 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6944 6871 "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6945 6872
6946 6873 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6947 6874
6948 6875 return (0);
6949 6876 }
6950 6877
6951 6878 /*
6952 6879 * ql_flash_layout_table
6953 6880 * Obtains flash addresses from table
6954 6881 *
6955 6882 * Input:
6956 6883 * ha: adapter state pointer.
6957 6884 * flt_paddr: flash layout pointer address.
6958 6885 *
6959 6886 * Context:
6960 6887 * Kernel context.
6961 6888 */
6962 6889 static void
6963 6890 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6964 6891 {
↓ open down ↓ |
1189 lines elided |
↑ open up ↑ |
6965 6892 ql_flt_ptr_t *fptr;
6966 6893 uint8_t *bp;
6967 6894 int rval;
6968 6895 uint32_t len, faddr, cnt;
6969 6896 uint16_t chksum, w16;
6970 6897
6971 6898 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6972 6899
6973 6900 /* Process flash layout table header */
6974 6901 len = sizeof (ql_flt_ptr_t);
6975 - if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
6976 - EL(ha, "kmem_zalloc=null\n");
6977 - return;
6978 - }
6902 + bp = kmem_zalloc(len, KM_SLEEP);
6979 6903
6980 6904 /* Process pointer to flash layout table */
6981 6905 if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
6982 6906 EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6983 6907 rval);
6984 6908 kmem_free(bp, len);
6985 6909 return;
6986 6910 }
6987 6911 fptr = (ql_flt_ptr_t *)bp;
6988 6912
6989 6913 /* Verify pointer to flash layout table. */
6990 6914 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6991 6915 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6992 6916 chksum += w16;
6993 6917 }
6994 6918 if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6995 6919 fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6996 6920 EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6997 6921 fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6998 6922 kmem_free(bp, len);
6999 6923 return;
7000 6924 }
7001 6925 faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
7002 6926 fptr->addr[3]);
7003 6927
7004 6928 kmem_free(bp, len);
7005 6929
7006 6930 ql_process_flt(ha, faddr);
7007 6931
7008 6932 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7009 6933 }
7010 6934
7011 6935 /*
7012 6936 * ql_process_flt
7013 6937 * Obtains flash addresses from flash layout table
7014 6938 *
7015 6939 * Input:
7016 6940 * ha: adapter state pointer.
7017 6941 * faddr: flash layout table byte address.
7018 6942 *
7019 6943 * Context:
7020 6944 * Kernel context.
7021 6945 */
7022 6946 static void
7023 6947 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7024 6948 {
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
7025 6949 ql_flt_hdr_t *fhdr;
7026 6950 ql_flt_region_t *frgn;
7027 6951 uint8_t *bp, *eaddr, nv_rg, vpd_rg;
7028 6952 int rval;
7029 6953 uint32_t len, cnt, fe_addr;
7030 6954 uint16_t chksum, w16;
7031 6955
7032 6956 QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr);
7033 6957
7034 6958 /* Process flash layout table header */
7035 - if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7036 - EL(ha, "kmem_zalloc=null\n");
7037 - return;
7038 - }
6959 + bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP);
7039 6960 fhdr = (ql_flt_hdr_t *)bp;
7040 6961
7041 6962 /* Process flash layout table. */
7042 6963 if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7043 6964 QL_SUCCESS) {
7044 6965 EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7045 6966 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7046 6967 return;
7047 6968 }
7048 6969
7049 6970 /* Verify flash layout table. */
7050 6971 len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7051 6972 sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7052 6973 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7053 6974 chksum = 0xffff;
7054 6975 } else {
7055 6976 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7056 6977 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7057 6978 chksum += w16;
7058 6979 }
7059 6980 }
7060 6981 w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7061 6982 if (chksum != 0 || w16 != 1) {
7062 6983 EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7063 6984 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7064 6985 return;
7065 6986 }
7066 6987 eaddr = bp + len;
7067 6988
7068 6989 /* Process Function/Port Configuration Map. */
7069 6990 nv_rg = vpd_rg = 0;
7070 6991 if (CFG_IST(ha, CFG_CTRL_8021)) {
7071 6992 uint16_t i;
7072 6993 uint8_t *mbp = eaddr;
7073 6994 ql_fp_cfg_map_t *cmp = (ql_fp_cfg_map_t *)mbp;
7074 6995
7075 6996 len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7076 6997 cmp->hdr.len[1]));
7077 6998 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7078 6999 chksum = 0xffff;
7079 7000 } else {
7080 7001 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7081 7002 w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7082 7003 mbp[cnt + 1]);
7083 7004 chksum += w16;
7084 7005 }
7085 7006 }
7086 7007 w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7087 7008 if (chksum != 0 || w16 != 1 ||
7088 7009 cmp->hdr.Signature[0] != 'F' ||
7089 7010 cmp->hdr.Signature[1] != 'P' ||
7090 7011 cmp->hdr.Signature[2] != 'C' ||
7091 7012 cmp->hdr.Signature[3] != 'M') {
7092 7013 EL(ha, "cfg_map chksum=%xh, version=%d, "
7093 7014 "sig=%c%c%c%c\n", chksum, w16,
7094 7015 cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7095 7016 cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7096 7017 } else {
7097 7018 cnt = (uint16_t)
7098 7019 (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7099 7020 cmp->hdr.NumberEntries[1]));
7100 7021 /* Locate entry for function. */
7101 7022 for (i = 0; i < cnt; i++) {
7102 7023 if (cmp->cfg[i].FunctionType == FT_FC &&
7103 7024 cmp->cfg[i].FunctionNumber[0] ==
7104 7025 ha->function_number &&
7105 7026 cmp->cfg[i].FunctionNumber[1] == 0) {
7106 7027 nv_rg = cmp->cfg[i].ConfigRegion;
7107 7028 vpd_rg = cmp->cfg[i].VpdRegion;
7108 7029 break;
7109 7030 }
7110 7031 }
7111 7032
7112 7033 if (nv_rg == 0 || vpd_rg == 0) {
7113 7034 EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7114 7035 vpd_rg);
7115 7036 nv_rg = vpd_rg = 0;
7116 7037 }
7117 7038 }
7118 7039 }
7119 7040
7120 7041 /* Process flash layout table regions */
7121 7042 for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7122 7043 (uint8_t *)frgn < eaddr; frgn++) {
7123 7044 faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7124 7045 frgn->beg_addr[2], frgn->beg_addr[3]);
7125 7046 faddr >>= 2;
7126 7047 fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7127 7048 frgn->end_addr[2], frgn->end_addr[3]);
7128 7049 fe_addr >>= 2;
7129 7050
7130 7051 switch (frgn->region) {
7131 7052 case FLASH_8021_BOOTLOADER_REGION:
7132 7053 ha->bootloader_addr = faddr;
7133 7054 ha->bootloader_size = (fe_addr - faddr) + 1;
7134 7055 QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, "
7135 7056 "size=%xh\n", ha->instance, faddr,
7136 7057 ha->bootloader_size);
7137 7058 break;
7138 7059 case FLASH_FW_REGION:
7139 7060 case FLASH_8021_FW_REGION:
7140 7061 ha->flash_fw_addr = faddr;
7141 7062 ha->flash_fw_size = (fe_addr - faddr) + 1;
7142 7063 QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, "
7143 7064 "size=%xh\n", ha->instance, faddr,
7144 7065 ha->flash_fw_size);
7145 7066 break;
7146 7067 case FLASH_GOLDEN_FW_REGION:
7147 7068 case FLASH_8021_GOLDEN_FW_REGION:
7148 7069 ha->flash_golden_fw_addr = faddr;
7149 7070 QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
7150 7071 ha->instance, faddr);
7151 7072 break;
7152 7073 case FLASH_8021_VPD_REGION:
7153 7074 if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7154 7075 ha->flash_vpd_addr = faddr;
7155 7076 QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_"
7156 7077 "addr=%xh\n", ha->instance, faddr);
7157 7078 }
7158 7079 break;
7159 7080 case FLASH_VPD_0_REGION:
7160 7081 if (vpd_rg) {
7161 7082 if (vpd_rg == FLASH_VPD_0_REGION) {
7162 7083 ha->flash_vpd_addr = faddr;
7163 7084 QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7164 7085 "flash_vpd_addr=%xh\n",
7165 7086 ha->instance, faddr);
7166 7087 }
7167 7088 } else if (!(ha->flags & FUNCTION_1) &&
7168 7089 !(CFG_IST(ha, CFG_CTRL_8021))) {
7169 7090 ha->flash_vpd_addr = faddr;
7170 7091 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7171 7092 "\n", ha->instance, faddr);
7172 7093 }
7173 7094 break;
7174 7095 case FLASH_NVRAM_0_REGION:
7175 7096 if (nv_rg) {
7176 7097 if (nv_rg == FLASH_NVRAM_0_REGION) {
7177 7098 ADAPTER_STATE_LOCK(ha);
7178 7099 ha->flags &= ~FUNCTION_1;
7179 7100 ADAPTER_STATE_UNLOCK(ha);
7180 7101 ha->flash_nvram_addr = faddr;
7181 7102 QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7182 7103 "flash_nvram_addr=%xh\n",
7183 7104 ha->instance, faddr);
7184 7105 }
7185 7106 } else if (!(ha->flags & FUNCTION_1)) {
7186 7107 ha->flash_nvram_addr = faddr;
7187 7108 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7188 7109 "%xh\n", ha->instance, faddr);
7189 7110 }
7190 7111 break;
7191 7112 case FLASH_VPD_1_REGION:
7192 7113 if (vpd_rg) {
7193 7114 if (vpd_rg == FLASH_VPD_1_REGION) {
7194 7115 ha->flash_vpd_addr = faddr;
7195 7116 QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7196 7117 "flash_vpd_addr=%xh\n",
7197 7118 ha->instance, faddr);
7198 7119 }
7199 7120 } else if (ha->flags & FUNCTION_1 &&
7200 7121 !(CFG_IST(ha, CFG_CTRL_8021))) {
7201 7122 ha->flash_vpd_addr = faddr;
7202 7123 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7203 7124 "\n", ha->instance, faddr);
7204 7125 }
7205 7126 break;
7206 7127 case FLASH_NVRAM_1_REGION:
7207 7128 if (nv_rg) {
7208 7129 if (nv_rg == FLASH_NVRAM_1_REGION) {
7209 7130 ADAPTER_STATE_LOCK(ha);
7210 7131 ha->flags |= FUNCTION_1;
7211 7132 ADAPTER_STATE_UNLOCK(ha);
7212 7133 ha->flash_nvram_addr = faddr;
7213 7134 QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7214 7135 "flash_nvram_addr=%xh\n",
7215 7136 ha->instance, faddr);
7216 7137 }
7217 7138 } else if (ha->flags & FUNCTION_1) {
7218 7139 ha->flash_nvram_addr = faddr;
7219 7140 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7220 7141 "%xh\n", ha->instance, faddr);
7221 7142 }
7222 7143 break;
7223 7144 case FLASH_DESC_TABLE_REGION:
7224 7145 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
7225 7146 ha->flash_desc_addr = faddr;
7226 7147 QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr="
7227 7148 "%xh\n", ha->instance, faddr);
7228 7149 }
7229 7150 break;
7230 7151 case FLASH_ERROR_LOG_0_REGION:
7231 7152 if (!(ha->flags & FUNCTION_1)) {
7232 7153 ha->flash_errlog_start = faddr;
7233 7154 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7234 7155 "%xh\n", ha->instance, faddr);
7235 7156 }
7236 7157 break;
7237 7158 case FLASH_ERROR_LOG_1_REGION:
7238 7159 if (ha->flags & FUNCTION_1) {
7239 7160 ha->flash_errlog_start = faddr;
7240 7161 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7241 7162 "%xh\n", ha->instance, faddr);
7242 7163 }
7243 7164 break;
7244 7165 default:
7245 7166 break;
7246 7167 }
7247 7168 }
7248 7169 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7249 7170
7250 7171 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7251 7172 }
7252 7173
7253 7174 /*
7254 7175 * ql_flash_nvram_defaults
7255 7176 * Flash default addresses.
7256 7177 *
7257 7178 * Input:
7258 7179 * ha: adapter state pointer.
7259 7180 *
7260 7181 * Returns:
7261 7182 * ql local function return status code.
7262 7183 *
7263 7184 * Context:
7264 7185 * Kernel context.
7265 7186 */
7266 7187 static void
7267 7188 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7268 7189 {
7269 7190 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7270 7191
7271 7192 if (ha->flags & FUNCTION_1) {
7272 7193 if (CFG_IST(ha, CFG_CTRL_2300)) {
7273 7194 ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7274 7195 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7275 7196 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
7276 7197 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7277 7198 ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7278 7199 ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7279 7200 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7280 7201 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7281 7202 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7282 7203 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7283 7204 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7284 7205 ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7285 7206 ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7286 7207 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7287 7208 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7288 7209 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7289 7210 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7290 7211 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7291 7212 ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7292 7213 ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7293 7214 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7294 7215 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7295 7216 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7296 7217 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
7297 7218 ha->flash_data_addr = 0;
7298 7219 ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7299 7220 ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7300 7221 ha->flash_errlog_start = 0;
7301 7222 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7302 7223 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7303 7224 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7304 7225 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7305 7226 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7306 7227 }
7307 7228 } else {
7308 7229 if (CFG_IST(ha, CFG_CTRL_2200)) {
7309 7230 ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7310 7231 ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7311 7232 } else if (CFG_IST(ha, CFG_CTRL_2300) ||
7312 7233 (CFG_IST(ha, CFG_CTRL_6322))) {
7313 7234 ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7314 7235 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7315 7236 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
7316 7237 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7317 7238 ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7318 7239 ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7319 7240 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7320 7241 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7321 7242 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7322 7243 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7323 7244 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7324 7245 ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7325 7246 ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7326 7247 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7327 7248 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7328 7249 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7329 7250 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7330 7251 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7331 7252 ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7332 7253 ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7333 7254 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7334 7255 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7335 7256 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7336 7257 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
7337 7258 ha->flash_data_addr = 0;
7338 7259 ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7339 7260 ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7340 7261 ha->flash_errlog_start = 0;
7341 7262 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7342 7263 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7343 7264 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7344 7265 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7345 7266 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7346 7267 } else {
7347 7268 EL(ha, "unassigned flash fn0 addr: %x\n",
7348 7269 ha->device_id);
7349 7270 }
7350 7271 }
7351 7272 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7352 7273 }
7353 7274
7354 7275 /*
7355 7276 * ql_get_sfp
7356 7277 * Returns sfp data to sdmapi caller
7357 7278 *
7358 7279 * Input:
7359 7280 * ha: adapter state pointer.
7360 7281 * cmd: Local EXT_IOCTL cmd struct pointer.
7361 7282 * mode: flags.
7362 7283 *
7363 7284 * Returns:
7364 7285 * None, request status indicated in cmd->Status.
7365 7286 *
7366 7287 * Context:
7367 7288 * Kernel context.
7368 7289 */
7369 7290 static void
7370 7291 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7371 7292 {
7372 7293 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7373 7294
7374 7295 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
7375 7296 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7376 7297 EL(ha, "failed, invalid request for HBA\n");
7377 7298 return;
7378 7299 }
7379 7300
7380 7301 if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7381 7302 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7382 7303 cmd->DetailStatus = QL_24XX_SFP_SIZE;
7383 7304 EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7384 7305 cmd->ResponseLen);
7385 7306 return;
7386 7307 }
7387 7308
7388 7309 /* Dump SFP data in user buffer */
7389 7310 if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7390 7311 mode)) != 0) {
7391 7312 cmd->Status = EXT_STATUS_COPY_ERR;
7392 7313 EL(ha, "failed, copy error\n");
7393 7314 } else {
7394 7315 cmd->Status = EXT_STATUS_OK;
7395 7316 }
7396 7317
7397 7318 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7398 7319 }
7399 7320
7400 7321 /*
7401 7322 * ql_dump_sfp
7402 7323 * Dumps SFP.
7403 7324 *
7404 7325 * Input:
7405 7326 * ha: adapter state pointer.
7406 7327 * bp: buffer address.
7407 7328 * mode: flags
7408 7329 *
7409 7330 * Returns:
7410 7331 *
7411 7332 * Context:
7412 7333 * Kernel context.
7413 7334 */
7414 7335 static int
7415 7336 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7416 7337 {
7417 7338 dma_mem_t mem;
7418 7339 uint32_t cnt;
7419 7340 int rval2, rval = 0;
7420 7341 uint32_t dxfer;
7421 7342
7422 7343 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7423 7344
7424 7345 /* Get memory for SFP. */
7425 7346
7426 7347 if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7427 7348 QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7428 7349 EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7429 7350 return (ENOMEM);
7430 7351 }
7431 7352
7432 7353 for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7433 7354 rval2 = ql_read_sfp(ha, &mem,
7434 7355 (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7435 7356 (uint16_t)(cnt & 0xff));
7436 7357 if (rval2 != QL_SUCCESS) {
7437 7358 EL(ha, "failed, read_sfp=%xh\n", rval2);
7438 7359 rval = EFAULT;
7439 7360 break;
7440 7361 }
7441 7362
7442 7363 /* copy the data back */
7443 7364 if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7444 7365 mode)) != mem.size) {
7445 7366 /* ddi copy error */
7446 7367 EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7447 7368 rval = EFAULT;
7448 7369 break;
7449 7370 }
7450 7371
7451 7372 /* adjust the buffer pointer */
7452 7373 bp = (caddr_t)bp + mem.size;
7453 7374 }
7454 7375
7455 7376 ql_free_phys(ha, &mem);
7456 7377
7457 7378 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7458 7379
7459 7380 return (rval);
7460 7381 }
7461 7382
7462 7383 /*
7463 7384 * ql_port_param
7464 7385 * Retrieves or sets the firmware port speed settings
7465 7386 *
7466 7387 * Input:
7467 7388 * ha: adapter state pointer.
7468 7389 * cmd: Local EXT_IOCTL cmd struct pointer.
7469 7390 * mode: flags.
7470 7391 *
7471 7392 * Returns:
7472 7393 * None, request status indicated in cmd->Status.
7473 7394 *
7474 7395 * Context:
7475 7396 * Kernel context.
7476 7397 *
7477 7398 */
7478 7399 static void
7479 7400 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7480 7401 {
7481 7402 uint8_t *name;
7482 7403 ql_tgt_t *tq;
7483 7404 EXT_PORT_PARAM port_param = {0};
7484 7405 uint32_t rval = QL_SUCCESS;
7485 7406 uint32_t idma_rate;
7486 7407
7487 7408 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7488 7409
7489 7410 if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7490 7411 EL(ha, "invalid request for this HBA\n");
7491 7412 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7492 7413 cmd->ResponseLen = 0;
7493 7414 return;
7494 7415 }
7495 7416
7496 7417 if (LOOP_NOT_READY(ha)) {
7497 7418 EL(ha, "failed, loop not ready\n");
7498 7419 cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7499 7420 cmd->ResponseLen = 0;
7500 7421 return;
7501 7422 }
7502 7423
7503 7424 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7504 7425 (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7505 7426 EL(ha, "failed, ddi_copyin\n");
7506 7427 cmd->Status = EXT_STATUS_COPY_ERR;
7507 7428 cmd->ResponseLen = 0;
7508 7429 return;
7509 7430 }
7510 7431
7511 7432 if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7512 7433 EL(ha, "Unsupported dest lookup type: %xh\n",
7513 7434 port_param.FCScsiAddr.DestType);
7514 7435 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7515 7436 cmd->ResponseLen = 0;
7516 7437 return;
7517 7438 }
7518 7439
7519 7440 name = port_param.FCScsiAddr.DestAddr.WWPN;
7520 7441
7521 7442 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7522 7443 ha->instance, name[0], name[1], name[2], name[3], name[4],
7523 7444 name[5], name[6], name[7]);
7524 7445
7525 7446 tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7526 7447 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7527 7448 EL(ha, "failed, fc_port not found\n");
7528 7449 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7529 7450 cmd->ResponseLen = 0;
7530 7451 return;
7531 7452 }
7532 7453
7533 7454 cmd->Status = EXT_STATUS_OK;
7534 7455 cmd->DetailStatus = EXT_STATUS_OK;
7535 7456
7536 7457 switch (port_param.Mode) {
7537 7458 case EXT_IIDMA_MODE_GET:
7538 7459 /*
7539 7460 * Report the firmware's port rate for the wwpn
7540 7461 */
7541 7462 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7542 7463 port_param.Mode);
7543 7464
7544 7465 if (rval != QL_SUCCESS) {
7545 7466 EL(ha, "iidma get failed: %xh\n", rval);
7546 7467 cmd->Status = EXT_STATUS_MAILBOX;
7547 7468 cmd->DetailStatus = rval;
7548 7469 cmd->ResponseLen = 0;
7549 7470 } else {
7550 7471 switch (idma_rate) {
7551 7472 case IIDMA_RATE_1GB:
7552 7473 port_param.Speed =
7553 7474 EXT_DEF_PORTSPEED_1GBIT;
7554 7475 break;
7555 7476 case IIDMA_RATE_2GB:
7556 7477 port_param.Speed =
7557 7478 EXT_DEF_PORTSPEED_2GBIT;
7558 7479 break;
7559 7480 case IIDMA_RATE_4GB:
7560 7481 port_param.Speed =
7561 7482 EXT_DEF_PORTSPEED_4GBIT;
7562 7483 break;
7563 7484 case IIDMA_RATE_8GB:
7564 7485 port_param.Speed =
7565 7486 EXT_DEF_PORTSPEED_8GBIT;
7566 7487 break;
7567 7488 case IIDMA_RATE_10GB:
7568 7489 port_param.Speed =
7569 7490 EXT_DEF_PORTSPEED_10GBIT;
7570 7491 break;
7571 7492 default:
7572 7493 port_param.Speed =
7573 7494 EXT_DEF_PORTSPEED_UNKNOWN;
7574 7495 EL(ha, "failed, Port speed rate=%xh\n",
7575 7496 idma_rate);
7576 7497 break;
7577 7498 }
7578 7499
7579 7500 /* Copy back the data */
7580 7501 rval = ddi_copyout((void *)&port_param,
7581 7502 (void *)(uintptr_t)cmd->ResponseAdr,
7582 7503 sizeof (EXT_PORT_PARAM), mode);
7583 7504
7584 7505 if (rval != 0) {
7585 7506 cmd->Status = EXT_STATUS_COPY_ERR;
7586 7507 cmd->ResponseLen = 0;
7587 7508 EL(ha, "failed, ddi_copyout\n");
7588 7509 } else {
7589 7510 cmd->ResponseLen = (uint32_t)
7590 7511 sizeof (EXT_PORT_PARAM);
7591 7512 }
7592 7513 }
7593 7514 break;
7594 7515
7595 7516 case EXT_IIDMA_MODE_SET:
7596 7517 /*
7597 7518 * Set the firmware's port rate for the wwpn
7598 7519 */
7599 7520 switch (port_param.Speed) {
7600 7521 case EXT_DEF_PORTSPEED_1GBIT:
7601 7522 idma_rate = IIDMA_RATE_1GB;
7602 7523 break;
7603 7524 case EXT_DEF_PORTSPEED_2GBIT:
7604 7525 idma_rate = IIDMA_RATE_2GB;
7605 7526 break;
7606 7527 case EXT_DEF_PORTSPEED_4GBIT:
7607 7528 idma_rate = IIDMA_RATE_4GB;
7608 7529 break;
7609 7530 case EXT_DEF_PORTSPEED_8GBIT:
7610 7531 idma_rate = IIDMA_RATE_8GB;
7611 7532 break;
7612 7533 case EXT_DEF_PORTSPEED_10GBIT:
7613 7534 port_param.Speed = IIDMA_RATE_10GB;
7614 7535 break;
7615 7536 default:
7616 7537 EL(ha, "invalid set iidma rate: %x\n",
7617 7538 port_param.Speed);
7618 7539 cmd->Status = EXT_STATUS_INVALID_PARAM;
7619 7540 cmd->ResponseLen = 0;
7620 7541 rval = QL_PARAMETER_ERROR;
7621 7542 break;
7622 7543 }
7623 7544
7624 7545 if (rval == QL_SUCCESS) {
7625 7546 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7626 7547 port_param.Mode);
7627 7548 if (rval != QL_SUCCESS) {
7628 7549 EL(ha, "iidma set failed: %xh\n", rval);
7629 7550 cmd->Status = EXT_STATUS_MAILBOX;
7630 7551 cmd->DetailStatus = rval;
7631 7552 cmd->ResponseLen = 0;
7632 7553 }
7633 7554 }
7634 7555 break;
7635 7556 default:
7636 7557 EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7637 7558 cmd->Status = EXT_STATUS_INVALID_PARAM;
7638 7559 cmd->ResponseLen = 0;
7639 7560 cmd->DetailStatus = 0;
7640 7561 break;
7641 7562 }
7642 7563
7643 7564 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7644 7565 }
7645 7566
7646 7567 /*
7647 7568 * ql_get_fwexttrace
7648 7569 * Dumps f/w extended trace buffer
7649 7570 *
7650 7571 * Input:
7651 7572 * ha: adapter state pointer.
7652 7573 * bp: buffer address.
7653 7574 * mode: flags
7654 7575 *
7655 7576 * Returns:
7656 7577 *
7657 7578 * Context:
7658 7579 * Kernel context.
7659 7580 */
7660 7581 /* ARGSUSED */
7661 7582 static void
7662 7583 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7663 7584 {
7664 7585 int rval;
7665 7586 caddr_t payload;
7666 7587
7667 7588 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7668 7589
7669 7590 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7670 7591 EL(ha, "invalid request for this HBA\n");
7671 7592 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7672 7593 cmd->ResponseLen = 0;
7673 7594 return;
7674 7595 }
7675 7596
7676 7597 if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7677 7598 (ha->fwexttracebuf.bp == NULL)) {
7678 7599 EL(ha, "f/w extended trace is not enabled\n");
7679 7600 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7680 7601 cmd->ResponseLen = 0;
7681 7602 return;
7682 7603 }
7683 7604
7684 7605 if (cmd->ResponseLen < FWEXTSIZE) {
7685 7606 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7686 7607 cmd->DetailStatus = FWEXTSIZE;
7687 7608 EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7688 7609 cmd->ResponseLen, FWEXTSIZE);
7689 7610 cmd->ResponseLen = 0;
7690 7611 return;
7691 7612 }
7692 7613
7693 7614 /* Time Stamp */
7694 7615 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7695 7616 if (rval != QL_SUCCESS) {
7696 7617 EL(ha, "f/w extended trace insert"
7697 7618 "time stamp failed: %xh\n", rval);
7698 7619 cmd->Status = EXT_STATUS_ERR;
7699 7620 cmd->ResponseLen = 0;
7700 7621 return;
7701 7622 }
7702 7623
7703 7624 /* Disable Tracing */
↓ open down ↓ |
655 lines elided |
↑ open up ↑ |
7704 7625 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7705 7626 if (rval != QL_SUCCESS) {
7706 7627 EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7707 7628 cmd->Status = EXT_STATUS_ERR;
7708 7629 cmd->ResponseLen = 0;
7709 7630 return;
7710 7631 }
7711 7632
7712 7633 /* Allocate payload buffer */
7713 7634 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7714 - if (payload == NULL) {
7715 - EL(ha, "failed, kmem_zalloc\n");
7716 - cmd->Status = EXT_STATUS_NO_MEMORY;
7717 - cmd->ResponseLen = 0;
7718 - return;
7719 - }
7720 7635
7721 7636 /* Sync DMA buffer. */
7722 7637 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7723 7638 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7724 7639
7725 7640 /* Copy trace buffer data. */
7726 7641 ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7727 7642 (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7728 7643 DDI_DEV_AUTOINCR);
7729 7644
7730 7645 /* Send payload to application. */
7731 7646 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7732 7647 cmd->ResponseLen, mode) != cmd->ResponseLen) {
7733 7648 EL(ha, "failed, send_buffer_data\n");
7734 7649 cmd->Status = EXT_STATUS_COPY_ERR;
7735 7650 cmd->ResponseLen = 0;
7736 7651 } else {
7737 7652 cmd->Status = EXT_STATUS_OK;
7738 7653 }
7739 7654
7740 7655 kmem_free(payload, FWEXTSIZE);
7741 7656
7742 7657 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7743 7658 }
7744 7659
7745 7660 /*
7746 7661 * ql_get_fwfcetrace
7747 7662 * Dumps f/w fibre channel event trace buffer
7748 7663 *
7749 7664 * Input:
7750 7665 * ha: adapter state pointer.
7751 7666 * bp: buffer address.
7752 7667 * mode: flags
7753 7668 *
7754 7669 * Returns:
7755 7670 *
7756 7671 * Context:
7757 7672 * Kernel context.
7758 7673 */
7759 7674 /* ARGSUSED */
7760 7675 static void
7761 7676 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7762 7677 {
7763 7678 int rval;
7764 7679 caddr_t payload;
7765 7680
7766 7681 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7767 7682
7768 7683 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7769 7684 EL(ha, "invalid request for this HBA\n");
7770 7685 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7771 7686 cmd->ResponseLen = 0;
7772 7687 return;
7773 7688 }
7774 7689
7775 7690 if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7776 7691 (ha->fwfcetracebuf.bp == NULL)) {
7777 7692 EL(ha, "f/w FCE trace is not enabled\n");
7778 7693 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7779 7694 cmd->ResponseLen = 0;
7780 7695 return;
7781 7696 }
7782 7697
7783 7698 if (cmd->ResponseLen < FWFCESIZE) {
7784 7699 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7785 7700 cmd->DetailStatus = FWFCESIZE;
7786 7701 EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7787 7702 cmd->ResponseLen, FWFCESIZE);
7788 7703 cmd->ResponseLen = 0;
7789 7704 return;
7790 7705 }
7791 7706
7792 7707 /* Disable Tracing */
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
7793 7708 rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7794 7709 if (rval != QL_SUCCESS) {
7795 7710 EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7796 7711 cmd->Status = EXT_STATUS_ERR;
7797 7712 cmd->ResponseLen = 0;
7798 7713 return;
7799 7714 }
7800 7715
7801 7716 /* Allocate payload buffer */
7802 7717 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7803 - if (payload == NULL) {
7804 - EL(ha, "failed, kmem_zalloc\n");
7805 - cmd->Status = EXT_STATUS_NO_MEMORY;
7806 - cmd->ResponseLen = 0;
7807 - return;
7808 - }
7809 7718
7810 7719 /* Sync DMA buffer. */
7811 7720 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7812 7721 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7813 7722
7814 7723 /* Copy trace buffer data. */
7815 7724 ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7816 7725 (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7817 7726 DDI_DEV_AUTOINCR);
7818 7727
7819 7728 /* Send payload to application. */
7820 7729 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7821 7730 cmd->ResponseLen, mode) != cmd->ResponseLen) {
7822 7731 EL(ha, "failed, send_buffer_data\n");
7823 7732 cmd->Status = EXT_STATUS_COPY_ERR;
7824 7733 cmd->ResponseLen = 0;
7825 7734 } else {
7826 7735 cmd->Status = EXT_STATUS_OK;
7827 7736 }
7828 7737
7829 7738 kmem_free(payload, FWFCESIZE);
7830 7739
7831 7740 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7832 7741 }
7833 7742
7834 7743 /*
7835 7744 * ql_get_pci_data
7836 7745 * Retrieves pci config space data
7837 7746 *
7838 7747 * Input:
7839 7748 * ha: adapter state pointer.
7840 7749 * cmd: Local EXT_IOCTL cmd struct pointer.
7841 7750 * mode: flags.
7842 7751 *
7843 7752 * Returns:
7844 7753 * None, request status indicated in cmd->Status.
7845 7754 *
7846 7755 * Context:
7847 7756 * Kernel context.
7848 7757 *
7849 7758 */
7850 7759 static void
7851 7760 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7852 7761 {
7853 7762 uint8_t cap_ptr;
7854 7763 uint8_t cap_id;
7855 7764 uint32_t buf_size = 256;
7856 7765
7857 7766 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7858 7767
7859 7768 /*
7860 7769 * First check the "Capabilities List" bit of the status register.
7861 7770 */
7862 7771 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7863 7772 /*
7864 7773 * Now get the capability pointer
7865 7774 */
7866 7775 cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7867 7776 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7868 7777 /*
7869 7778 * Check for the pcie capability.
7870 7779 */
7871 7780 cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7872 7781 if (cap_id == PCI_CAP_ID_PCI_E) {
7873 7782 buf_size = 4096;
7874 7783 break;
7875 7784 }
7876 7785 cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7877 7786 (cap_ptr + PCI_CAP_NEXT_PTR));
7878 7787 }
7879 7788 }
7880 7789
7881 7790 if (cmd->ResponseLen < buf_size) {
7882 7791 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7883 7792 cmd->DetailStatus = buf_size;
7884 7793 EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7885 7794 cmd->ResponseLen);
7886 7795 return;
7887 7796 }
7888 7797
7889 7798 /* Dump PCI config data. */
7890 7799 if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7891 7800 buf_size, mode)) != 0) {
7892 7801 cmd->Status = EXT_STATUS_COPY_ERR;
7893 7802 cmd->DetailStatus = 0;
7894 7803 EL(ha, "failed, copy err pci_dump\n");
7895 7804 } else {
7896 7805 cmd->Status = EXT_STATUS_OK;
7897 7806 cmd->DetailStatus = buf_size;
7898 7807 }
7899 7808
7900 7809 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7901 7810 }
7902 7811
7903 7812 /*
7904 7813 * ql_pci_dump
7905 7814 * Dumps PCI config data to application buffer.
7906 7815 *
7907 7816 * Input:
7908 7817 * ha = adapter state pointer.
7909 7818 * bp = user buffer address.
7910 7819 *
7911 7820 * Returns:
7912 7821 *
7913 7822 * Context:
7914 7823 * Kernel context.
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
7915 7824 */
7916 7825 int
7917 7826 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7918 7827 {
7919 7828 uint32_t pci_os;
7920 7829 uint32_t *ptr32, *org_ptr32;
7921 7830
7922 7831 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7923 7832
7924 7833 ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7925 - if (ptr32 == NULL) {
7926 - EL(ha, "failed kmem_zalloc\n");
7927 - return (ENOMEM);
7928 - }
7929 7834
7930 7835 /* store the initial value of ptr32 */
7931 7836 org_ptr32 = ptr32;
7932 7837 for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7933 7838 *ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7934 7839 LITTLE_ENDIAN_32(ptr32);
7935 7840 ptr32++;
7936 7841 }
7937 7842
7938 7843 if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7939 7844 0) {
7940 7845 EL(ha, "failed ddi_copyout\n");
7941 7846 kmem_free(org_ptr32, pci_size);
7942 7847 return (EFAULT);
7943 7848 }
7944 7849
7945 7850 QL_DUMP_9(org_ptr32, 8, pci_size);
7946 7851
7947 7852 kmem_free(org_ptr32, pci_size);
7948 7853
7949 7854 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7950 7855
7951 7856 return (0);
7952 7857 }
7953 7858
7954 7859 /*
7955 7860 * ql_menlo_reset
7956 7861 * Reset Menlo
7957 7862 *
7958 7863 * Input:
7959 7864 * ha: adapter state pointer.
7960 7865 * bp: buffer address.
7961 7866 * mode: flags
7962 7867 *
7963 7868 * Returns:
7964 7869 *
7965 7870 * Context:
7966 7871 * Kernel context.
7967 7872 */
7968 7873 static void
7969 7874 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7970 7875 {
7971 7876 EXT_MENLO_RESET rst;
7972 7877 ql_mbx_data_t mr;
7973 7878 int rval;
7974 7879
7975 7880 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7976 7881
7977 7882 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7978 7883 EL(ha, "failed, invalid request for HBA\n");
7979 7884 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7980 7885 cmd->ResponseLen = 0;
7981 7886 return;
7982 7887 }
7983 7888
7984 7889 /*
7985 7890 * TODO: only vp_index 0 can do this (?)
7986 7891 */
7987 7892
7988 7893 /* Verify the size of request structure. */
7989 7894 if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7990 7895 /* Return error */
7991 7896 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7992 7897 sizeof (EXT_MENLO_RESET));
7993 7898 cmd->Status = EXT_STATUS_INVALID_PARAM;
7994 7899 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7995 7900 cmd->ResponseLen = 0;
7996 7901 return;
7997 7902 }
7998 7903
7999 7904 /* Get reset request. */
8000 7905 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
8001 7906 (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
8002 7907 EL(ha, "failed, ddi_copyin\n");
8003 7908 cmd->Status = EXT_STATUS_COPY_ERR;
8004 7909 cmd->ResponseLen = 0;
8005 7910 return;
8006 7911 }
8007 7912
8008 7913 /* Wait for I/O to stop and daemon to stall. */
8009 7914 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8010 7915 EL(ha, "ql_stall_driver failed\n");
8011 7916 ql_restart_hba(ha);
8012 7917 cmd->Status = EXT_STATUS_BUSY;
8013 7918 cmd->ResponseLen = 0;
8014 7919 return;
8015 7920 }
8016 7921
8017 7922 rval = ql_reset_menlo(ha, &mr, rst.Flags);
8018 7923 if (rval != QL_SUCCESS) {
8019 7924 EL(ha, "failed, status=%xh\n", rval);
8020 7925 cmd->Status = EXT_STATUS_MAILBOX;
8021 7926 cmd->DetailStatus = rval;
8022 7927 cmd->ResponseLen = 0;
8023 7928 } else if (mr.mb[1] != 0) {
8024 7929 EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8025 7930 cmd->Status = EXT_STATUS_ERR;
8026 7931 cmd->DetailStatus = mr.mb[1];
8027 7932 cmd->ResponseLen = 0;
8028 7933 }
8029 7934
8030 7935 ql_restart_hba(ha);
8031 7936
8032 7937 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8033 7938 }
8034 7939
8035 7940 /*
8036 7941 * ql_menlo_get_fw_version
8037 7942 * Get Menlo firmware version.
8038 7943 *
8039 7944 * Input:
8040 7945 * ha: adapter state pointer.
8041 7946 * bp: buffer address.
8042 7947 * mode: flags
8043 7948 *
8044 7949 * Returns:
8045 7950 *
8046 7951 * Context:
8047 7952 * Kernel context.
8048 7953 */
8049 7954 static void
8050 7955 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8051 7956 {
8052 7957 int rval;
8053 7958 ql_mbx_iocb_t *pkt;
8054 7959 EXT_MENLO_GET_FW_VERSION ver = {0};
8055 7960
8056 7961 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8057 7962
8058 7963 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8059 7964 EL(ha, "failed, invalid request for HBA\n");
8060 7965 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8061 7966 cmd->ResponseLen = 0;
8062 7967 return;
8063 7968 }
8064 7969
8065 7970 if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
8066 7971 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8067 7972 cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8068 7973 EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8069 7974 sizeof (EXT_MENLO_GET_FW_VERSION));
8070 7975 cmd->ResponseLen = 0;
8071 7976 return;
8072 7977 }
8073 7978
8074 7979 /* Allocate packet. */
8075 7980 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8076 - if (pkt == NULL) {
8077 - EL(ha, "failed, kmem_zalloc\n");
8078 - cmd->Status = EXT_STATUS_NO_MEMORY;
8079 - cmd->ResponseLen = 0;
8080 - return;
8081 - }
8082 7981
8083 7982 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8084 7983 pkt->mvfy.entry_count = 1;
8085 7984 pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
8086 7985
8087 7986 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8088 7987 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8089 7988 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8090 7989 ver.FwVersion = LE_32(pkt->mvfy.fw_version);
8091 7990
8092 7991 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8093 7992 pkt->mvfy.options_status != CS_COMPLETE) {
8094 7993 /* Command error */
8095 7994 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8096 7995 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8097 7996 pkt->mvfy.failure_code);
8098 7997 cmd->Status = EXT_STATUS_ERR;
8099 7998 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8100 7999 QL_FUNCTION_FAILED;
8101 8000 cmd->ResponseLen = 0;
8102 8001 } else if (ddi_copyout((void *)&ver,
8103 8002 (void *)(uintptr_t)cmd->ResponseAdr,
8104 8003 sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8105 8004 EL(ha, "failed, ddi_copyout\n");
8106 8005 cmd->Status = EXT_STATUS_COPY_ERR;
8107 8006 cmd->ResponseLen = 0;
8108 8007 } else {
8109 8008 cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8110 8009 }
8111 8010
8112 8011 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8113 8012
8114 8013 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8115 8014 }
8116 8015
8117 8016 /*
8118 8017 * ql_menlo_update_fw
8119 8018 * Get Menlo update firmware.
8120 8019 *
8121 8020 * Input:
8122 8021 * ha: adapter state pointer.
8123 8022 * bp: buffer address.
8124 8023 * mode: flags
8125 8024 *
8126 8025 * Returns:
8127 8026 *
8128 8027 * Context:
8129 8028 * Kernel context.
8130 8029 */
8131 8030 static void
8132 8031 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8133 8032 {
8134 8033 ql_mbx_iocb_t *pkt;
8135 8034 dma_mem_t *dma_mem;
8136 8035 EXT_MENLO_UPDATE_FW fw;
8137 8036 uint32_t *ptr32;
8138 8037 int rval;
8139 8038
8140 8039 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8141 8040
8142 8041 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8143 8042 EL(ha, "failed, invalid request for HBA\n");
8144 8043 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8145 8044 cmd->ResponseLen = 0;
8146 8045 return;
8147 8046 }
8148 8047
8149 8048 /*
8150 8049 * TODO: only vp_index 0 can do this (?)
8151 8050 */
8152 8051
8153 8052 /* Verify the size of request structure. */
8154 8053 if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8155 8054 /* Return error */
8156 8055 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8157 8056 sizeof (EXT_MENLO_UPDATE_FW));
8158 8057 cmd->Status = EXT_STATUS_INVALID_PARAM;
8159 8058 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8160 8059 cmd->ResponseLen = 0;
8161 8060 return;
8162 8061 }
8163 8062
8164 8063 /* Get update fw request. */
8165 8064 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
8166 8065 sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
8167 8066 EL(ha, "failed, ddi_copyin\n");
8168 8067 cmd->Status = EXT_STATUS_COPY_ERR;
8169 8068 cmd->ResponseLen = 0;
8170 8069 return;
8171 8070 }
8172 8071
8173 8072 /* Wait for I/O to stop and daemon to stall. */
8174 8073 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8175 8074 EL(ha, "ql_stall_driver failed\n");
8176 8075 ql_restart_hba(ha);
8177 8076 cmd->Status = EXT_STATUS_BUSY;
8178 8077 cmd->ResponseLen = 0;
8179 8078 return;
8180 8079 }
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
8181 8080
8182 8081 /* Allocate packet. */
8183 8082 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8184 8083 if (dma_mem == NULL) {
8185 8084 EL(ha, "failed, kmem_zalloc\n");
8186 8085 cmd->Status = EXT_STATUS_NO_MEMORY;
8187 8086 cmd->ResponseLen = 0;
8188 8087 return;
8189 8088 }
8190 8089 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8191 - if (pkt == NULL) {
8192 - EL(ha, "failed, kmem_zalloc\n");
8193 - kmem_free(dma_mem, sizeof (dma_mem_t));
8194 - ql_restart_hba(ha);
8195 - cmd->Status = EXT_STATUS_NO_MEMORY;
8196 - cmd->ResponseLen = 0;
8197 - return;
8198 - }
8199 8090
8200 8091 /* Get DMA memory for the IOCB */
8201 8092 if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8202 8093 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8203 8094 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8204 8095 "alloc failed", QL_NAME, ha->instance);
8205 8096 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8206 8097 kmem_free(dma_mem, sizeof (dma_mem_t));
8207 8098 ql_restart_hba(ha);
8208 8099 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8209 8100 cmd->ResponseLen = 0;
8210 8101 return;
8211 8102 }
8212 8103
8213 8104 /* Get firmware data. */
8214 8105 if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8215 8106 fw.TotalByteCount, mode) != fw.TotalByteCount) {
8216 8107 EL(ha, "failed, get_buffer_data\n");
8217 8108 ql_free_dma_resource(ha, dma_mem);
8218 8109 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8219 8110 kmem_free(dma_mem, sizeof (dma_mem_t));
8220 8111 ql_restart_hba(ha);
8221 8112 cmd->Status = EXT_STATUS_COPY_ERR;
8222 8113 cmd->ResponseLen = 0;
8223 8114 return;
8224 8115 }
8225 8116
8226 8117 /* Sync DMA buffer. */
8227 8118 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8228 8119 DDI_DMA_SYNC_FORDEV);
8229 8120
8230 8121 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8231 8122 pkt->mvfy.entry_count = 1;
8232 8123 pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8233 8124 ptr32 = dma_mem->bp;
8234 8125 pkt->mvfy.fw_version = LE_32(ptr32[2]);
8235 8126 pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8236 8127 pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8237 8128 pkt->mvfy.dseg_count = LE_16(1);
8238 8129 pkt->mvfy.dseg_0_address[0] = (uint32_t)
8239 8130 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8240 8131 pkt->mvfy.dseg_0_address[1] = (uint32_t)
8241 8132 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8242 8133 pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
8243 8134
8244 8135 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8245 8136 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8246 8137 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8247 8138
8248 8139 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8249 8140 pkt->mvfy.options_status != CS_COMPLETE) {
8250 8141 /* Command error */
8251 8142 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8252 8143 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8253 8144 pkt->mvfy.failure_code);
8254 8145 cmd->Status = EXT_STATUS_ERR;
8255 8146 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8256 8147 QL_FUNCTION_FAILED;
8257 8148 cmd->ResponseLen = 0;
8258 8149 }
8259 8150
8260 8151 ql_free_dma_resource(ha, dma_mem);
8261 8152 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8262 8153 kmem_free(dma_mem, sizeof (dma_mem_t));
8263 8154 ql_restart_hba(ha);
8264 8155
8265 8156 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8266 8157 }
8267 8158
8268 8159 /*
8269 8160 * ql_menlo_manage_info
8270 8161 * Get Menlo manage info.
8271 8162 *
8272 8163 * Input:
8273 8164 * ha: adapter state pointer.
8274 8165 * bp: buffer address.
8275 8166 * mode: flags
8276 8167 *
8277 8168 * Returns:
8278 8169 *
8279 8170 * Context:
8280 8171 * Kernel context.
8281 8172 */
8282 8173 static void
8283 8174 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8284 8175 {
8285 8176 ql_mbx_iocb_t *pkt;
8286 8177 dma_mem_t *dma_mem = NULL;
8287 8178 EXT_MENLO_MANAGE_INFO info;
8288 8179 int rval;
8289 8180
8290 8181 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8291 8182
8292 8183
8293 8184 /* The call is only supported for Schultz right now */
8294 8185 if (CFG_IST(ha, CFG_CTRL_8081)) {
8295 8186 ql_get_xgmac_statistics(ha, cmd, mode);
8296 8187 QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
8297 8188 ha->instance);
8298 8189 return;
8299 8190 }
8300 8191
8301 8192 if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
8302 8193 EL(ha, "failed, invalid request for HBA\n");
8303 8194 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8304 8195 cmd->ResponseLen = 0;
8305 8196 return;
8306 8197 }
8307 8198
8308 8199 /* Verify the size of request structure. */
8309 8200 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8310 8201 /* Return error */
8311 8202 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8312 8203 sizeof (EXT_MENLO_MANAGE_INFO));
8313 8204 cmd->Status = EXT_STATUS_INVALID_PARAM;
8314 8205 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8315 8206 cmd->ResponseLen = 0;
8316 8207 return;
8317 8208 }
8318 8209
8319 8210 /* Get manage info request. */
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
8320 8211 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8321 8212 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8322 8213 EL(ha, "failed, ddi_copyin\n");
8323 8214 cmd->Status = EXT_STATUS_COPY_ERR;
8324 8215 cmd->ResponseLen = 0;
8325 8216 return;
8326 8217 }
8327 8218
8328 8219 /* Allocate packet. */
8329 8220 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8330 - if (pkt == NULL) {
8331 - EL(ha, "failed, kmem_zalloc\n");
8332 - ql_restart_driver(ha);
8333 - cmd->Status = EXT_STATUS_NO_MEMORY;
8334 - cmd->ResponseLen = 0;
8335 - return;
8336 - }
8337 8221
8338 8222 pkt->mdata.entry_type = MENLO_DATA_TYPE;
8339 8223 pkt->mdata.entry_count = 1;
8340 8224 pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8341 8225
8342 8226 /* Get DMA memory for the IOCB */
8343 8227 if (info.Operation == MENLO_OP_READ_MEM ||
8344 8228 info.Operation == MENLO_OP_WRITE_MEM) {
8345 8229 pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8346 8230 pkt->mdata.parameter_1 =
8347 8231 LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8348 8232 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8349 8233 KM_SLEEP);
8350 8234 if (dma_mem == NULL) {
8351 8235 EL(ha, "failed, kmem_zalloc\n");
8352 8236 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8353 8237 cmd->Status = EXT_STATUS_NO_MEMORY;
8354 8238 cmd->ResponseLen = 0;
8355 8239 return;
8356 8240 }
8357 8241 if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8358 8242 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8359 8243 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8360 8244 "alloc failed", QL_NAME, ha->instance);
8361 8245 kmem_free(dma_mem, sizeof (dma_mem_t));
8362 8246 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8363 8247 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8364 8248 cmd->ResponseLen = 0;
8365 8249 return;
8366 8250 }
8367 8251 if (info.Operation == MENLO_OP_WRITE_MEM) {
8368 8252 /* Get data. */
8369 8253 if (ql_get_buffer_data(
8370 8254 (caddr_t)(uintptr_t)info.pDataBytes,
8371 8255 dma_mem->bp, info.TotalByteCount, mode) !=
8372 8256 info.TotalByteCount) {
8373 8257 EL(ha, "failed, get_buffer_data\n");
8374 8258 ql_free_dma_resource(ha, dma_mem);
8375 8259 kmem_free(dma_mem, sizeof (dma_mem_t));
8376 8260 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8377 8261 cmd->Status = EXT_STATUS_COPY_ERR;
8378 8262 cmd->ResponseLen = 0;
8379 8263 return;
8380 8264 }
8381 8265 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
8382 8266 dma_mem->size, DDI_DMA_SYNC_FORDEV);
8383 8267 }
8384 8268 pkt->mdata.dseg_count = LE_16(1);
8385 8269 pkt->mdata.dseg_0_address[0] = (uint32_t)
8386 8270 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8387 8271 pkt->mdata.dseg_0_address[1] = (uint32_t)
8388 8272 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8389 8273 pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8390 8274 } else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8391 8275 pkt->mdata.parameter_1 =
8392 8276 LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8393 8277 pkt->mdata.parameter_2 =
8394 8278 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8395 8279 pkt->mdata.parameter_3 =
8396 8280 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8397 8281 } else if (info.Operation & MENLO_OP_GET_INFO) {
8398 8282 pkt->mdata.parameter_1 =
8399 8283 LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8400 8284 pkt->mdata.parameter_2 =
8401 8285 LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8402 8286 }
8403 8287
8404 8288 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8405 8289 LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8406 8290 LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8407 8291
8408 8292 if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8409 8293 pkt->mdata.options_status != CS_COMPLETE) {
8410 8294 /* Command error */
8411 8295 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8412 8296 pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8413 8297 pkt->mdata.failure_code);
8414 8298 cmd->Status = EXT_STATUS_ERR;
8415 8299 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8416 8300 QL_FUNCTION_FAILED;
8417 8301 cmd->ResponseLen = 0;
8418 8302 } else if (info.Operation == MENLO_OP_READ_MEM) {
8419 8303 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8420 8304 DDI_DMA_SYNC_FORKERNEL);
8421 8305 if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8422 8306 dma_mem->bp, info.TotalByteCount, mode) !=
8423 8307 info.TotalByteCount) {
8424 8308 cmd->Status = EXT_STATUS_COPY_ERR;
8425 8309 cmd->ResponseLen = 0;
8426 8310 }
8427 8311 }
8428 8312
8429 8313 ql_free_dma_resource(ha, dma_mem);
8430 8314 kmem_free(dma_mem, sizeof (dma_mem_t));
8431 8315 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8432 8316
8433 8317 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8434 8318 }
8435 8319
8436 8320 /*
8437 8321 * ql_suspend_hba
8438 8322 * Suspends all adapter ports.
8439 8323 *
8440 8324 * Input:
8441 8325 * ha: adapter state pointer.
8442 8326 * options: BIT_0 --> leave driver stalled on exit if
8443 8327 * failed.
8444 8328 *
8445 8329 * Returns:
8446 8330 * ql local function return status code.
8447 8331 *
8448 8332 * Context:
8449 8333 * Kernel context.
8450 8334 */
8451 8335 static int
8452 8336 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8453 8337 {
8454 8338 ql_adapter_state_t *ha2;
8455 8339 ql_link_t *link;
8456 8340 int rval = QL_SUCCESS;
8457 8341
8458 8342 /* Quiesce I/O on all adapter ports */
8459 8343 for (link = ql_hba.first; link != NULL; link = link->next) {
8460 8344 ha2 = link->base_address;
8461 8345
8462 8346 if (ha2->fru_hba_index != ha->fru_hba_index) {
8463 8347 continue;
8464 8348 }
8465 8349
8466 8350 if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8467 8351 EL(ha, "ql_stall_driver status=%xh\n", rval);
8468 8352 break;
8469 8353 }
8470 8354 }
8471 8355
8472 8356 return (rval);
8473 8357 }
8474 8358
8475 8359 /*
8476 8360 * ql_restart_hba
8477 8361 * Restarts adapter.
8478 8362 *
8479 8363 * Input:
8480 8364 * ha: adapter state pointer.
8481 8365 *
8482 8366 * Context:
8483 8367 * Kernel context.
8484 8368 */
8485 8369 static void
8486 8370 ql_restart_hba(ql_adapter_state_t *ha)
8487 8371 {
8488 8372 ql_adapter_state_t *ha2;
8489 8373 ql_link_t *link;
8490 8374
8491 8375 /* Resume I/O on all adapter ports */
8492 8376 for (link = ql_hba.first; link != NULL; link = link->next) {
8493 8377 ha2 = link->base_address;
8494 8378
8495 8379 if (ha2->fru_hba_index != ha->fru_hba_index) {
8496 8380 continue;
8497 8381 }
8498 8382
8499 8383 ql_restart_driver(ha2);
8500 8384 }
8501 8385 }
8502 8386
8503 8387 /*
8504 8388 * ql_get_vp_cnt_id
8505 8389 * Retrieves pci config space data
8506 8390 *
8507 8391 * Input:
8508 8392 * ha: adapter state pointer.
8509 8393 * cmd: Local EXT_IOCTL cmd struct pointer.
8510 8394 * mode: flags.
8511 8395 *
8512 8396 * Returns:
8513 8397 * None, request status indicated in cmd->Status.
8514 8398 *
8515 8399 * Context:
8516 8400 * Kernel context.
8517 8401 *
8518 8402 */
8519 8403 static void
8520 8404 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8521 8405 {
8522 8406 ql_adapter_state_t *vha;
8523 8407 PEXT_VPORT_ID_CNT ptmp_vp;
8524 8408 int id = 0;
8525 8409 int rval;
8526 8410 char name[MAXPATHLEN];
8527 8411
8528 8412 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8529 8413
8530 8414 /*
8531 8415 * To be backward compatible with older API
8532 8416 * check for the size of old EXT_VPORT_ID_CNT
8533 8417 */
8534 8418 if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8535 8419 (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8536 8420 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8537 8421 cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8538 8422 EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8539 8423 cmd->ResponseLen);
8540 8424 cmd->ResponseLen = 0;
8541 8425 return;
8542 8426 }
8543 8427
8544 8428 ptmp_vp = (EXT_VPORT_ID_CNT *)
8545 8429 kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8546 8430 if (ptmp_vp == NULL) {
8547 8431 EL(ha, "failed, kmem_zalloc\n");
8548 8432 cmd->ResponseLen = 0;
8549 8433 return;
8550 8434 }
8551 8435 vha = ha->vp_next;
8552 8436 while (vha != NULL) {
8553 8437 ptmp_vp->VpCnt++;
8554 8438 ptmp_vp->VpId[id] = vha->vp_index;
8555 8439 (void) ddi_pathname(vha->dip, name);
8556 8440 (void) strcpy((char *)ptmp_vp->vp_path[id], name);
8557 8441 ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8558 8442 id++;
8559 8443 vha = vha->vp_next;
8560 8444 }
8561 8445 rval = ddi_copyout((void *)ptmp_vp,
8562 8446 (void *)(uintptr_t)(cmd->ResponseAdr),
8563 8447 cmd->ResponseLen, mode);
8564 8448 if (rval != 0) {
8565 8449 cmd->Status = EXT_STATUS_COPY_ERR;
8566 8450 cmd->ResponseLen = 0;
8567 8451 EL(ha, "failed, ddi_copyout\n");
8568 8452 } else {
8569 8453 cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8570 8454 QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8571 8455 ha->instance, ptmp_vp->VpCnt);
8572 8456 }
8573 8457
8574 8458 }
8575 8459
8576 8460 /*
8577 8461 * ql_vp_ioctl
8578 8462 * Performs all EXT_CC_VPORT_CMD functions.
8579 8463 *
8580 8464 * Input:
8581 8465 * ha: adapter state pointer.
8582 8466 * cmd: Local EXT_IOCTL cmd struct pointer.
8583 8467 * mode: flags.
8584 8468 *
8585 8469 * Returns:
8586 8470 * None, request status indicated in cmd->Status.
8587 8471 *
8588 8472 * Context:
8589 8473 * Kernel context.
8590 8474 */
8591 8475 static void
8592 8476 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8593 8477 {
8594 8478 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8595 8479 cmd->SubCode);
8596 8480
8597 8481 /* case off on command subcode */
8598 8482 switch (cmd->SubCode) {
8599 8483 case EXT_VF_SC_VPORT_GETINFO:
8600 8484 ql_qry_vport(ha, cmd, mode);
8601 8485 break;
8602 8486 default:
8603 8487 /* function not supported. */
8604 8488 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8605 8489 EL(ha, "failed, Unsupported Subcode=%xh\n",
8606 8490 cmd->SubCode);
8607 8491 break;
8608 8492 }
8609 8493
8610 8494 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8611 8495 }
8612 8496
8613 8497 /*
8614 8498 * ql_qry_vport
8615 8499 * Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8616 8500 *
8617 8501 * Input:
8618 8502 * ha: adapter state pointer.
8619 8503 * cmd: EXT_IOCTL cmd struct pointer.
8620 8504 * mode: flags.
8621 8505 *
8622 8506 * Returns:
8623 8507 * None, request status indicated in cmd->Status.
8624 8508 *
8625 8509 * Context:
8626 8510 * Kernel context.
8627 8511 */
8628 8512 static void
8629 8513 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8630 8514 {
8631 8515 ql_adapter_state_t *tmp_vha;
8632 8516 EXT_VPORT_INFO tmp_vport = {0};
8633 8517 int max_vport;
8634 8518
8635 8519 QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8636 8520
8637 8521 if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8638 8522 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8639 8523 cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8640 8524 EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8641 8525 cmd->ResponseLen);
8642 8526 cmd->ResponseLen = 0;
8643 8527 return;
8644 8528 }
8645 8529
8646 8530 /* Fill in the vport information. */
8647 8531 bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8648 8532 EXT_DEF_WWN_NAME_SIZE);
8649 8533 bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8650 8534 EXT_DEF_WWN_NAME_SIZE);
8651 8535 tmp_vport.state = vha->state;
8652 8536 tmp_vport.id = vha->vp_index;
8653 8537
8654 8538 tmp_vha = vha->pha->vp_next;
8655 8539 while (tmp_vha != NULL) {
8656 8540 tmp_vport.used++;
8657 8541 tmp_vha = tmp_vha->vp_next;
8658 8542 }
8659 8543
8660 8544 max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8661 8545 MAX_25_VIRTUAL_PORTS);
8662 8546 if (max_vport > tmp_vport.used) {
8663 8547 tmp_vport.free = max_vport - tmp_vport.used;
8664 8548 }
8665 8549
8666 8550 if (ddi_copyout((void *)&tmp_vport,
8667 8551 (void *)(uintptr_t)(cmd->ResponseAdr),
8668 8552 sizeof (EXT_VPORT_INFO), mode) != 0) {
8669 8553 cmd->Status = EXT_STATUS_COPY_ERR;
8670 8554 cmd->ResponseLen = 0;
8671 8555 EL(vha, "failed, ddi_copyout\n");
8672 8556 } else {
8673 8557 cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8674 8558 QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8675 8559 }
8676 8560 }
8677 8561
8678 8562 /*
8679 8563 * ql_access_flash
8680 8564 * Performs all EXT_CC_ACCESS_FLASH_OS functions.
8681 8565 *
8682 8566 * Input:
8683 8567 * pi: port info pointer.
8684 8568 * cmd: Local EXT_IOCTL cmd struct pointer.
8685 8569 * mode: flags.
8686 8570 *
8687 8571 * Returns:
8688 8572 * None, request status indicated in cmd->Status.
8689 8573 *
8690 8574 * Context:
8691 8575 * Kernel context.
8692 8576 */
8693 8577 static void
8694 8578 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8695 8579 {
8696 8580 int rval;
8697 8581
8698 8582 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8699 8583
8700 8584 switch (cmd->SubCode) {
8701 8585 case EXT_SC_FLASH_READ:
8702 8586 if ((rval = ql_flash_fcode_dump(ha,
8703 8587 (void *)(uintptr_t)(cmd->ResponseAdr),
8704 8588 (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8705 8589 cmd->Status = EXT_STATUS_COPY_ERR;
8706 8590 cmd->ResponseLen = 0;
8707 8591 EL(ha, "flash_fcode_dump status=%xh\n", rval);
8708 8592 }
8709 8593 break;
8710 8594 case EXT_SC_FLASH_WRITE:
8711 8595 if ((rval = ql_r_m_w_flash(ha,
8712 8596 (void *)(uintptr_t)(cmd->RequestAdr),
8713 8597 (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8714 8598 QL_SUCCESS) {
8715 8599 cmd->Status = EXT_STATUS_COPY_ERR;
8716 8600 cmd->ResponseLen = 0;
8717 8601 EL(ha, "r_m_w_flash status=%xh\n", rval);
8718 8602 } else {
8719 8603 /* Reset caches on all adapter instances. */
8720 8604 ql_update_flash_caches(ha);
8721 8605 }
8722 8606 break;
8723 8607 default:
8724 8608 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8725 8609 cmd->Status = EXT_STATUS_ERR;
8726 8610 cmd->ResponseLen = 0;
8727 8611 break;
8728 8612 }
8729 8613
8730 8614 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8731 8615 }
8732 8616
8733 8617 /*
8734 8618 * ql_reset_cmd
8735 8619 * Performs all EXT_CC_RESET_FW_OS functions.
8736 8620 *
8737 8621 * Input:
8738 8622 * ha: adapter state pointer.
8739 8623 * cmd: Local EXT_IOCTL cmd struct pointer.
8740 8624 *
8741 8625 * Returns:
8742 8626 * None, request status indicated in cmd->Status.
8743 8627 *
8744 8628 * Context:
8745 8629 * Kernel context.
8746 8630 */
8747 8631 static void
8748 8632 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8749 8633 {
8750 8634 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8751 8635
8752 8636 switch (cmd->SubCode) {
8753 8637 case EXT_SC_RESET_FC_FW:
8754 8638 EL(ha, "isp_abort_needed\n");
8755 8639 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8756 8640 break;
8757 8641 case EXT_SC_RESET_MPI_FW:
8758 8642 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8759 8643 EL(ha, "invalid request for HBA\n");
8760 8644 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8761 8645 cmd->ResponseLen = 0;
8762 8646 } else {
8763 8647 /* Wait for I/O to stop and daemon to stall. */
8764 8648 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8765 8649 EL(ha, "ql_suspend_hba failed\n");
8766 8650 cmd->Status = EXT_STATUS_BUSY;
8767 8651 cmd->ResponseLen = 0;
8768 8652 } else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8769 8653 cmd->Status = EXT_STATUS_ERR;
8770 8654 cmd->ResponseLen = 0;
8771 8655 } else {
8772 8656 uint8_t timer;
8773 8657 /*
8774 8658 * While the restart_mpi mailbox cmd may be
8775 8659 * done the MPI is not. Wait at least 6 sec. or
8776 8660 * exit if the loop comes up.
8777 8661 */
8778 8662 for (timer = 6; timer; timer--) {
8779 8663 if (!(ha->task_daemon_flags &
8780 8664 LOOP_DOWN)) {
8781 8665 break;
8782 8666 }
8783 8667 /* Delay for 1 second. */
8784 8668 ql_delay(ha, 1000000);
8785 8669 }
8786 8670 }
8787 8671 ql_restart_hba(ha);
8788 8672 }
8789 8673 break;
8790 8674 default:
8791 8675 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8792 8676 cmd->Status = EXT_STATUS_ERR;
8793 8677 cmd->ResponseLen = 0;
8794 8678 break;
8795 8679 }
8796 8680
8797 8681 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8798 8682 }
8799 8683
8800 8684 /*
8801 8685 * ql_get_dcbx_parameters
8802 8686 * Get DCBX parameters.
8803 8687 *
8804 8688 * Input:
8805 8689 * ha: adapter state pointer.
8806 8690 * cmd: User space CT arguments pointer.
8807 8691 * mode: flags.
8808 8692 */
8809 8693 static void
8810 8694 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8811 8695 {
8812 8696 uint8_t *tmp_buf;
8813 8697 int rval;
8814 8698
8815 8699 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
↓ open down ↓ |
469 lines elided |
↑ open up ↑ |
8816 8700
8817 8701 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8818 8702 EL(ha, "invalid request for HBA\n");
8819 8703 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8820 8704 cmd->ResponseLen = 0;
8821 8705 return;
8822 8706 }
8823 8707
8824 8708 /* Allocate memory for command. */
8825 8709 tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8826 - if (tmp_buf == NULL) {
8827 - EL(ha, "failed, kmem_zalloc\n");
8828 - cmd->Status = EXT_STATUS_NO_MEMORY;
8829 - cmd->ResponseLen = 0;
8830 - return;
8831 - }
8832 8710 /* Send command */
8833 8711 rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8834 8712 (caddr_t)tmp_buf);
8835 8713 if (rval != QL_SUCCESS) {
8836 8714 /* error */
8837 8715 EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8838 8716 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8839 8717 cmd->Status = EXT_STATUS_ERR;
8840 8718 cmd->ResponseLen = 0;
8841 8719 return;
8842 8720 }
8843 8721
8844 8722 /* Copy the response */
8845 8723 if (ql_send_buffer_data((caddr_t)tmp_buf,
8846 8724 (caddr_t)(uintptr_t)cmd->ResponseAdr,
8847 8725 EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8848 8726 EL(ha, "failed, ddi_copyout\n");
8849 8727 cmd->Status = EXT_STATUS_COPY_ERR;
8850 8728 cmd->ResponseLen = 0;
8851 8729 } else {
8852 8730 cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8853 8731 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8854 8732 }
8855 8733 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8856 8734
8857 8735 }
8858 8736
8859 8737 /*
8860 8738 * ql_qry_cna_port
8861 8739 * Performs EXT_SC_QUERY_CNA_PORT subfunction.
8862 8740 *
8863 8741 * Input:
8864 8742 * ha: adapter state pointer.
8865 8743 * cmd: EXT_IOCTL cmd struct pointer.
8866 8744 * mode: flags.
8867 8745 *
8868 8746 * Returns:
8869 8747 * None, request status indicated in cmd->Status.
8870 8748 *
8871 8749 * Context:
8872 8750 * Kernel context.
8873 8751 */
8874 8752 static void
8875 8753 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8876 8754 {
8877 8755 EXT_CNA_PORT cna_port = {0};
8878 8756
8879 8757 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8880 8758
8881 8759 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8882 8760 EL(ha, "invalid request for HBA\n");
8883 8761 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8884 8762 cmd->ResponseLen = 0;
8885 8763 return;
8886 8764 }
8887 8765
8888 8766 if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8889 8767 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8890 8768 cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8891 8769 EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8892 8770 cmd->ResponseLen);
8893 8771 cmd->ResponseLen = 0;
8894 8772 return;
8895 8773 }
8896 8774
8897 8775 cna_port.VLanId = ha->fcoe_vlan_id;
8898 8776 cna_port.FabricParam = ha->fabric_params;
8899 8777 bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8900 8778 EXT_DEF_MAC_ADDRESS_SIZE);
8901 8779
8902 8780 if (ddi_copyout((void *)&cna_port,
8903 8781 (void *)(uintptr_t)(cmd->ResponseAdr),
8904 8782 sizeof (EXT_CNA_PORT), mode) != 0) {
8905 8783 cmd->Status = EXT_STATUS_COPY_ERR;
8906 8784 cmd->ResponseLen = 0;
8907 8785 EL(ha, "failed, ddi_copyout\n");
8908 8786 } else {
8909 8787 cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8910 8788 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8911 8789 }
8912 8790 }
8913 8791
8914 8792 /*
8915 8793 * ql_qry_adapter_versions
8916 8794 * Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
8917 8795 *
8918 8796 * Input:
8919 8797 * ha: adapter state pointer.
8920 8798 * cmd: EXT_IOCTL cmd struct pointer.
8921 8799 * mode: flags.
8922 8800 *
8923 8801 * Returns:
8924 8802 * None, request status indicated in cmd->Status.
8925 8803 *
8926 8804 * Context:
8927 8805 * Kernel context.
8928 8806 */
8929 8807 static void
8930 8808 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
8931 8809 int mode)
8932 8810 {
8933 8811 uint8_t is_8142, mpi_cap;
8934 8812 uint32_t ver_len, transfer_size;
8935 8813 PEXT_ADAPTERREGIONVERSION padapter_ver = NULL;
8936 8814
8937 8815 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8938 8816
8939 8817 /* 8142s do not have a EDC PHY firmware. */
8940 8818 mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
8941 8819
8942 8820 is_8142 = 0;
8943 8821 /* Sizeof (Length + Reserved) = 8 Bytes */
8944 8822 if (mpi_cap == 0x02 || mpi_cap == 0x04) {
8945 8823 ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
8946 8824 + 8;
8947 8825 is_8142 = 1;
8948 8826 } else {
8949 8827 ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
8950 8828 }
8951 8829
8952 8830 /* Allocate local memory for EXT_ADAPTERREGIONVERSION */
8953 8831 padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
8954 8832 KM_SLEEP);
8955 8833
8956 8834 if (padapter_ver == NULL) {
8957 8835 EL(ha, "failed, kmem_zalloc\n");
8958 8836 cmd->Status = EXT_STATUS_NO_MEMORY;
8959 8837 cmd->ResponseLen = 0;
8960 8838 return;
8961 8839 }
8962 8840
8963 8841 padapter_ver->Length = 1;
8964 8842 /* Copy MPI version */
8965 8843 padapter_ver->RegionVersion[0].Region =
8966 8844 EXT_OPT_ROM_REGION_MPI_RISC_FW;
8967 8845 padapter_ver->RegionVersion[0].Version[0] =
8968 8846 ha->mpi_fw_major_version;
8969 8847 padapter_ver->RegionVersion[0].Version[1] =
8970 8848 ha->mpi_fw_minor_version;
8971 8849 padapter_ver->RegionVersion[0].Version[2] =
8972 8850 ha->mpi_fw_subminor_version;
8973 8851 padapter_ver->RegionVersion[0].VersionLength = 3;
8974 8852 padapter_ver->RegionVersion[0].Location = RUNNING_VERSION;
8975 8853
8976 8854 if (!is_8142) {
8977 8855 padapter_ver->RegionVersion[1].Region =
8978 8856 EXT_OPT_ROM_REGION_EDC_PHY_FW;
8979 8857 padapter_ver->RegionVersion[1].Version[0] =
8980 8858 ha->phy_fw_major_version;
8981 8859 padapter_ver->RegionVersion[1].Version[1] =
8982 8860 ha->phy_fw_minor_version;
8983 8861 padapter_ver->RegionVersion[1].Version[2] =
8984 8862 ha->phy_fw_subminor_version;
8985 8863 padapter_ver->RegionVersion[1].VersionLength = 3;
8986 8864 padapter_ver->RegionVersion[1].Location = RUNNING_VERSION;
8987 8865 padapter_ver->Length = NO_OF_VERSIONS;
8988 8866 }
8989 8867
8990 8868 if (cmd->ResponseLen < ver_len) {
8991 8869 EL(ha, "failed, ResponseLen < ver_len, ",
8992 8870 "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
8993 8871 /* Calculate the No. of valid versions being returned. */
8994 8872 padapter_ver->Length = (uint32_t)
8995 8873 ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
8996 8874 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8997 8875 cmd->DetailStatus = ver_len;
8998 8876 transfer_size = cmd->ResponseLen;
8999 8877 } else {
9000 8878 transfer_size = ver_len;
9001 8879 }
9002 8880
9003 8881 if (ddi_copyout((void *)padapter_ver,
9004 8882 (void *)(uintptr_t)(cmd->ResponseAdr),
9005 8883 transfer_size, mode) != 0) {
9006 8884 cmd->Status = EXT_STATUS_COPY_ERR;
9007 8885 cmd->ResponseLen = 0;
9008 8886 EL(ha, "failed, ddi_copyout\n");
9009 8887 } else {
9010 8888 cmd->ResponseLen = ver_len;
9011 8889 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9012 8890 }
9013 8891
9014 8892 kmem_free(padapter_ver, ver_len);
9015 8893 }
9016 8894
9017 8895 /*
9018 8896 * ql_get_xgmac_statistics
9019 8897 * Get XgMac information
9020 8898 *
9021 8899 * Input:
9022 8900 * ha: adapter state pointer.
9023 8901 * cmd: EXT_IOCTL cmd struct pointer.
9024 8902 * mode: flags.
9025 8903 *
9026 8904 * Returns:
9027 8905 * None, request status indicated in cmd->Status.
9028 8906 *
9029 8907 * Context:
9030 8908 * Kernel context.
9031 8909 */
9032 8910 static void
9033 8911 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9034 8912 {
9035 8913 int rval;
9036 8914 uint32_t size;
9037 8915 int8_t *tmp_buf;
9038 8916 EXT_MENLO_MANAGE_INFO info;
9039 8917
9040 8918 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9041 8919
9042 8920 /* Verify the size of request structure. */
9043 8921 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9044 8922 /* Return error */
9045 8923 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9046 8924 sizeof (EXT_MENLO_MANAGE_INFO));
9047 8925 cmd->Status = EXT_STATUS_INVALID_PARAM;
9048 8926 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9049 8927 cmd->ResponseLen = 0;
9050 8928 return;
9051 8929 }
9052 8930
9053 8931 /* Get manage info request. */
9054 8932 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9055 8933 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9056 8934 EL(ha, "failed, ddi_copyin\n");
9057 8935 cmd->Status = EXT_STATUS_COPY_ERR;
9058 8936 cmd->ResponseLen = 0;
9059 8937 return;
9060 8938 }
9061 8939
9062 8940 size = info.TotalByteCount;
9063 8941 if (!size) {
↓ open down ↓ |
222 lines elided |
↑ open up ↑ |
9064 8942 /* parameter error */
9065 8943 cmd->Status = EXT_STATUS_INVALID_PARAM;
9066 8944 cmd->DetailStatus = 0;
9067 8945 EL(ha, "failed, size=%xh\n", size);
9068 8946 cmd->ResponseLen = 0;
9069 8947 return;
9070 8948 }
9071 8949
9072 8950 /* Allocate memory for command. */
9073 8951 tmp_buf = kmem_zalloc(size, KM_SLEEP);
9074 - if (tmp_buf == NULL) {
9075 - EL(ha, "failed, kmem_zalloc\n");
9076 - cmd->Status = EXT_STATUS_NO_MEMORY;
9077 - cmd->ResponseLen = 0;
9078 - return;
9079 - }
9080 8952
9081 8953 if (!(info.Operation & MENLO_OP_GET_INFO)) {
9082 8954 EL(ha, "Invalid request for 81XX\n");
9083 8955 kmem_free(tmp_buf, size);
9084 8956 cmd->Status = EXT_STATUS_ERR;
9085 8957 cmd->ResponseLen = 0;
9086 8958 return;
9087 8959 }
9088 8960
9089 8961 rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9090 8962
9091 8963 if (rval != QL_SUCCESS) {
9092 8964 /* error */
9093 8965 EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9094 8966 kmem_free(tmp_buf, size);
9095 8967 cmd->Status = EXT_STATUS_ERR;
9096 8968 cmd->ResponseLen = 0;
9097 8969 return;
9098 8970 }
9099 8971
9100 8972 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9101 8973 size, mode) != size) {
9102 8974 EL(ha, "failed, ddi_copyout\n");
9103 8975 cmd->Status = EXT_STATUS_COPY_ERR;
9104 8976 cmd->ResponseLen = 0;
9105 8977 } else {
9106 8978 cmd->ResponseLen = info.TotalByteCount;
9107 8979 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9108 8980 }
9109 8981 kmem_free(tmp_buf, size);
9110 8982 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9111 8983 }
9112 8984
9113 8985 /*
9114 8986 * ql_get_fcf_list
9115 8987 * Get FCF list.
9116 8988 *
9117 8989 * Input:
9118 8990 * ha: adapter state pointer.
9119 8991 * cmd: User space CT arguments pointer.
9120 8992 * mode: flags.
9121 8993 */
9122 8994 static void
9123 8995 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9124 8996 {
9125 8997 uint8_t *tmp_buf;
9126 8998 int rval;
9127 8999 EXT_FCF_LIST fcf_list = {0};
9128 9000 ql_fcf_list_desc_t mb_fcf_list = {0};
9129 9001
9130 9002 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9131 9003
9132 9004 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
9133 9005 EL(ha, "invalid request for HBA\n");
9134 9006 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9135 9007 cmd->ResponseLen = 0;
9136 9008 return;
9137 9009 }
9138 9010 /* Get manage info request. */
9139 9011 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9140 9012 (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9141 9013 EL(ha, "failed, ddi_copyin\n");
9142 9014 cmd->Status = EXT_STATUS_COPY_ERR;
9143 9015 cmd->ResponseLen = 0;
9144 9016 return;
9145 9017 }
9146 9018
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
9147 9019 if (!(fcf_list.BufSize)) {
9148 9020 /* Return error */
9149 9021 EL(ha, "failed, fcf_list BufSize is=%xh\n",
9150 9022 fcf_list.BufSize);
9151 9023 cmd->Status = EXT_STATUS_INVALID_PARAM;
9152 9024 cmd->ResponseLen = 0;
9153 9025 return;
9154 9026 }
9155 9027 /* Allocate memory for command. */
9156 9028 tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP);
9157 - if (tmp_buf == NULL) {
9158 - EL(ha, "failed, kmem_zalloc\n");
9159 - cmd->Status = EXT_STATUS_NO_MEMORY;
9160 - cmd->ResponseLen = 0;
9161 - return;
9162 - }
9163 9029 /* build the descriptor */
9164 9030 if (fcf_list.Options) {
9165 9031 mb_fcf_list.options = FCF_LIST_RETURN_ONE;
9166 9032 } else {
9167 9033 mb_fcf_list.options = FCF_LIST_RETURN_ALL;
9168 9034 }
9169 9035 mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex;
9170 9036 mb_fcf_list.buffer_size = fcf_list.BufSize;
9171 9037
9172 9038 /* Send command */
9173 9039 rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9174 9040 if (rval != QL_SUCCESS) {
9175 9041 /* error */
9176 9042 EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9177 9043 kmem_free(tmp_buf, fcf_list.BufSize);
9178 9044 cmd->Status = EXT_STATUS_ERR;
9179 9045 cmd->ResponseLen = 0;
9180 9046 return;
9181 9047 }
9182 9048
9183 9049 /* Copy the response */
9184 9050 if (ql_send_buffer_data((caddr_t)tmp_buf,
9185 9051 (caddr_t)(uintptr_t)cmd->ResponseAdr,
9186 9052 fcf_list.BufSize, mode) != fcf_list.BufSize) {
9187 9053 EL(ha, "failed, ddi_copyout\n");
9188 9054 cmd->Status = EXT_STATUS_COPY_ERR;
9189 9055 cmd->ResponseLen = 0;
9190 9056 } else {
9191 9057 cmd->ResponseLen = mb_fcf_list.buffer_size;
9192 9058 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9193 9059 }
9194 9060
9195 9061 kmem_free(tmp_buf, fcf_list.BufSize);
9196 9062 }
9197 9063
9198 9064 /*
9199 9065 * ql_get_resource_counts
9200 9066 * Get Resource counts:
9201 9067 *
9202 9068 * Input:
9203 9069 * ha: adapter state pointer.
9204 9070 * cmd: User space CT arguments pointer.
9205 9071 * mode: flags.
9206 9072 */
9207 9073 static void
9208 9074 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9209 9075 {
9210 9076 int rval;
9211 9077 ql_mbx_data_t mr;
9212 9078 EXT_RESOURCE_CNTS tmp_rc_cnt = {0};
9213 9079
9214 9080 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9215 9081
9216 9082 if (!(CFG_IST(ha, CFG_CTRL_242581))) {
9217 9083 EL(ha, "invalid request for HBA\n");
9218 9084 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9219 9085 cmd->ResponseLen = 0;
9220 9086 return;
9221 9087 }
9222 9088
9223 9089 if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9224 9090 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9225 9091 cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9226 9092 EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9227 9093 "Len=%xh\n", cmd->ResponseLen);
9228 9094 cmd->ResponseLen = 0;
9229 9095 return;
9230 9096 }
9231 9097
9232 9098 rval = ql_get_resource_cnts(ha, &mr);
9233 9099 if (rval != QL_SUCCESS) {
9234 9100 EL(ha, "resource cnt mbx failed\n");
9235 9101 cmd->Status = EXT_STATUS_ERR;
9236 9102 cmd->ResponseLen = 0;
9237 9103 return;
9238 9104 }
9239 9105
9240 9106 tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9241 9107 tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9242 9108 tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9243 9109 tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9244 9110 tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9245 9111 tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9246 9112 tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9247 9113 tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9248 9114
9249 9115 rval = ddi_copyout((void *)&tmp_rc_cnt,
9250 9116 (void *)(uintptr_t)(cmd->ResponseAdr),
9251 9117 sizeof (EXT_RESOURCE_CNTS), mode);
9252 9118 if (rval != 0) {
9253 9119 cmd->Status = EXT_STATUS_COPY_ERR;
9254 9120 cmd->ResponseLen = 0;
9255 9121 EL(ha, "failed, ddi_copyout\n");
9256 9122 } else {
9257 9123 cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9258 9124 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9259 9125 }
9260 9126 }
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX