Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/hxge/hxge_txdma.c
+++ new/usr/src/uts/common/io/hxge/hxge_txdma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <hxge_impl.h>
27 27 #include <hxge_txdma.h>
28 28 #include <sys/llc1.h>
29 29
30 30 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
31 31 uint32_t hxge_tx_minfree = 64;
32 32 uint32_t hxge_tx_intr_thres = 0;
33 33 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
34 34 uint32_t hxge_tx_tiny_pack = 1;
35 35 uint32_t hxge_tx_use_bcopy = 1;
36 36
37 37 extern uint32_t hxge_tx_ring_size;
38 38 extern uint32_t hxge_bcopy_thresh;
39 39 extern uint32_t hxge_dvma_thresh;
40 40 extern uint32_t hxge_dma_stream_thresh;
41 41 extern dma_method_t hxge_force_dma;
42 42
43 43 /* Device register access attributes for PIO. */
44 44 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr;
45 45
46 46 /* Device descriptor access attributes for DMA. */
47 47 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr;
48 48
49 49 /* Device buffer access attributes for DMA. */
50 50 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr;
51 51 extern ddi_dma_attr_t hxge_desc_dma_attr;
52 52 extern ddi_dma_attr_t hxge_tx_dma_attr;
53 53
54 54 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep);
55 55 static void hxge_unmap_txdma(p_hxge_t hxgep);
56 56 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep);
57 57 static void hxge_txdma_hw_stop(p_hxge_t hxgep);
58 58
59 59 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
60 60 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
61 61 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
62 62 p_tx_mbox_t *tx_mbox_p);
63 63 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
64 64 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
65 65 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t,
66 66 p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t);
67 67 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,
68 68 p_tx_ring_t tx_ring_p);
69 69 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t,
70 70 p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *);
71 71 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
72 72 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
73 73 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
74 74 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
75 75 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
76 76 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
77 77 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel);
78 78 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index,
79 79 p_hxge_ldv_t ldvp, tdc_stat_t cs);
80 80 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel);
81 81 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep,
82 82 uint16_t channel, p_tx_ring_t tx_ring_p);
83 83 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep);
84 84
85 85 hxge_status_t
86 86 hxge_init_txdma_channels(p_hxge_t hxgep)
87 87 {
88 88 hxge_status_t status = HXGE_OK;
89 89 block_reset_t reset_reg;
90 90
91 91 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels"));
92 92
93 93 /*
94 94 * Reset TDC block from PEU to cleanup any unknown configuration.
95 95 * This may be resulted from previous reboot.
96 96 */
97 97 reset_reg.value = 0;
98 98 reset_reg.bits.tdc_rst = 1;
99 99 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
100 100
101 101 HXGE_DELAY(1000);
102 102
103 103 status = hxge_map_txdma(hxgep);
104 104 if (status != HXGE_OK) {
105 105 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
106 106 "<== hxge_init_txdma_channels: status 0x%x", status));
107 107 return (status);
108 108 }
109 109
110 110 status = hxge_txdma_hw_start(hxgep);
111 111 if (status != HXGE_OK) {
112 112 hxge_unmap_txdma(hxgep);
113 113 return (status);
114 114 }
115 115
116 116 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
117 117 "<== hxge_init_txdma_channels: status 0x%x", status));
118 118
119 119 return (HXGE_OK);
120 120 }
121 121
122 122 void
123 123 hxge_uninit_txdma_channels(p_hxge_t hxgep)
124 124 {
125 125 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels"));
126 126
127 127 hxge_txdma_hw_stop(hxgep);
128 128 hxge_unmap_txdma(hxgep);
129 129
130 130 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels"));
131 131 }
132 132
133 133 void
134 134 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p,
135 135 uint32_t entries, uint32_t size)
136 136 {
137 137 size_t tsize;
138 138 *dest_p = *src_p;
139 139 tsize = size * entries;
140 140 dest_p->alength = tsize;
141 141 dest_p->nblocks = entries;
142 142 dest_p->block_size = size;
143 143 dest_p->offset += tsize;
144 144
145 145 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
146 146 src_p->alength -= tsize;
147 147 src_p->dma_cookie.dmac_laddress += tsize;
148 148 src_p->dma_cookie.dmac_size -= tsize;
149 149 }
150 150
151 151 hxge_status_t
152 152 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data)
153 153 {
154 154 hpi_status_t rs = HPI_SUCCESS;
155 155 hxge_status_t status = HXGE_OK;
156 156 hpi_handle_t handle;
157 157
158 158 HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel"));
159 159
160 160 handle = HXGE_DEV_HPI_HANDLE(hxgep);
161 161 if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) {
162 162 rs = hpi_txdma_channel_reset(handle, channel);
163 163 } else {
164 164 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel);
165 165 }
166 166
167 167 if (rs != HPI_SUCCESS) {
168 168 status = HXGE_ERROR | rs;
169 169 }
170 170
171 171 /*
172 172 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
173 173 * overflow fatal error if tail is not set to 0 after reset!
174 174 */
175 175 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
176 176
177 177 HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel"));
178 178
179 179 return (status);
180 180 }
181 181
182 182 hxge_status_t
183 183 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel,
184 184 tdc_int_mask_t *mask_p)
185 185 {
186 186 hpi_handle_t handle;
187 187 hpi_status_t rs = HPI_SUCCESS;
188 188 hxge_status_t status = HXGE_OK;
189 189
190 190 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
191 191 "<== hxge_init_txdma_channel_event_mask"));
192 192
193 193 handle = HXGE_DEV_HPI_HANDLE(hxgep);
194 194
195 195 /*
196 196 * Mask off tx_rng_oflow since it is a false alarm. The driver
197 197 * ensures not over flowing the hardware and check the hardware
198 198 * status.
199 199 */
200 200 mask_p->bits.tx_rng_oflow = 1;
201 201 rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p);
202 202 if (rs != HPI_SUCCESS) {
203 203 status = HXGE_ERROR | rs;
204 204 }
205 205
206 206 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
207 207 "==> hxge_init_txdma_channel_event_mask"));
208 208 return (status);
209 209 }
210 210
211 211 hxge_status_t
212 212 hxge_enable_txdma_channel(p_hxge_t hxgep,
213 213 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
214 214 {
215 215 hpi_handle_t handle;
216 216 hpi_status_t rs = HPI_SUCCESS;
217 217 hxge_status_t status = HXGE_OK;
218 218
219 219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel"));
220 220
221 221 handle = HXGE_DEV_HPI_HANDLE(hxgep);
222 222 /*
223 223 * Use configuration data composed at init time. Write to hardware the
224 224 * transmit ring configurations.
225 225 */
226 226 rs = hpi_txdma_ring_config(handle, OP_SET, channel,
227 227 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
228 228
229 229 if (rs != HPI_SUCCESS) {
230 230 return (HXGE_ERROR | rs);
231 231 }
232 232
233 233 /* Write to hardware the mailbox */
234 234 rs = hpi_txdma_mbox_config(handle, OP_SET, channel,
235 235 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
236 236
237 237 if (rs != HPI_SUCCESS) {
238 238 return (HXGE_ERROR | rs);
239 239 }
240 240
241 241 /* Start the DMA engine. */
242 242 rs = hpi_txdma_channel_init_enable(handle, channel);
243 243 if (rs != HPI_SUCCESS) {
244 244 return (HXGE_ERROR | rs);
245 245 }
246 246 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel"));
247 247 return (status);
248 248 }
249 249
250 250 void
251 251 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
252 252 int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp)
253 253 {
254 254 p_tx_pkt_header_t hdrp;
255 255 p_mblk_t nmp;
256 256 uint64_t tmp;
257 257 size_t mblk_len;
258 258 size_t iph_len;
259 259 size_t hdrs_size;
260 260 uint8_t *ip_buf;
261 261 uint16_t eth_type;
262 262 uint8_t ipproto;
263 263 boolean_t is_vlan = B_FALSE;
264 264 size_t eth_hdr_size;
265 265 uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)];
266 266
267 267 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp));
268 268
269 269 /*
270 270 * Caller should zero out the headers first.
271 271 */
272 272 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
273 273
274 274 if (fill_len) {
275 275 HXGE_DEBUG_MSG((NULL, TX_CTL,
276 276 "==> hxge_fill_tx_hdr: pkt_len %d npads %d",
277 277 pkt_len, npads));
278 278 tmp = (uint64_t)pkt_len;
279 279 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
280 280
281 281 goto fill_tx_header_done;
282 282 }
283 283 tmp = (uint64_t)npads;
284 284 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
285 285
286 286 /*
287 287 * mp is the original data packet (does not include the Neptune
288 288 * transmit header).
289 289 */
290 290 nmp = mp;
291 291 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
292 292 HXGE_DEBUG_MSG((NULL, TX_CTL,
293 293 "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
294 294 mp, nmp->b_rptr, mblk_len));
295 295 ip_buf = NULL;
296 296 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
297 297 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
298 298 HXGE_DEBUG_MSG((NULL, TX_CTL,
299 299 "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
300 300 eth_type, hdrp->value));
301 301
302 302 if (eth_type < ETHERMTU) {
303 303 tmp = 1ull;
304 304 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
305 305 HXGE_DEBUG_MSG((NULL, TX_CTL,
306 306 "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value));
307 307 if (*(hdrs_buf + sizeof (struct ether_header)) ==
308 308 LLC_SNAP_SAP) {
309 309 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
310 310 sizeof (struct ether_header) + 6)));
311 311 HXGE_DEBUG_MSG((NULL, TX_CTL,
312 312 "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
313 313 eth_type));
314 314 } else {
315 315 goto fill_tx_header_done;
316 316 }
317 317 } else if (eth_type == VLAN_ETHERTYPE) {
318 318 tmp = 1ull;
319 319 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
320 320
321 321 eth_type = ntohs(((struct ether_vlan_header *)
322 322 hdrs_buf)->ether_type);
323 323 is_vlan = B_TRUE;
324 324 HXGE_DEBUG_MSG((NULL, TX_CTL,
325 325 "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
326 326 hdrp->value));
327 327 }
328 328 if (!is_vlan) {
329 329 eth_hdr_size = sizeof (struct ether_header);
330 330 } else {
331 331 eth_hdr_size = sizeof (struct ether_vlan_header);
332 332 }
333 333
334 334 switch (eth_type) {
335 335 case ETHERTYPE_IP:
336 336 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
337 337 ip_buf = nmp->b_rptr + eth_hdr_size;
338 338 mblk_len -= eth_hdr_size;
339 339 iph_len = ((*ip_buf) & 0x0f);
340 340 if (mblk_len > (iph_len + sizeof (uint32_t))) {
341 341 ip_buf = nmp->b_rptr;
342 342 ip_buf += eth_hdr_size;
343 343 } else {
344 344 ip_buf = NULL;
345 345 }
346 346 }
347 347 if (ip_buf == NULL) {
348 348 hdrs_size = 0;
349 349 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
350 350 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
351 351 mblk_len = (size_t)nmp->b_wptr -
352 352 (size_t)nmp->b_rptr;
353 353 if (mblk_len >=
354 354 (sizeof (hdrs_buf) - hdrs_size))
355 355 mblk_len = sizeof (hdrs_buf) -
356 356 hdrs_size;
357 357 bcopy(nmp->b_rptr,
358 358 &hdrs_buf[hdrs_size], mblk_len);
359 359 hdrs_size += mblk_len;
360 360 nmp = nmp->b_cont;
361 361 }
362 362 ip_buf = hdrs_buf;
363 363 ip_buf += eth_hdr_size;
364 364 iph_len = ((*ip_buf) & 0x0f);
365 365 }
366 366 ipproto = ip_buf[9];
367 367
368 368 tmp = (uint64_t)iph_len;
369 369 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
370 370 tmp = (uint64_t)(eth_hdr_size >> 1);
371 371 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
372 372
373 373 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 "
374 374 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
375 375 "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size,
376 376 ipproto, tmp));
377 377 HXGE_DEBUG_MSG((NULL, TX_CTL,
378 378 "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value));
379 379 break;
380 380
381 381 case ETHERTYPE_IPV6:
382 382 hdrs_size = 0;
383 383 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
384 384 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
385 385 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
386 386 if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size))
387 387 mblk_len = sizeof (hdrs_buf) - hdrs_size;
388 388 bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len);
389 389 hdrs_size += mblk_len;
390 390 nmp = nmp->b_cont;
391 391 }
392 392 ip_buf = hdrs_buf;
393 393 ip_buf += eth_hdr_size;
394 394
395 395 tmp = 1ull;
396 396 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
397 397
398 398 tmp = (eth_hdr_size >> 1);
399 399 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
400 400
401 401 /* byte 6 is the next header protocol */
402 402 ipproto = ip_buf[6];
403 403
404 404 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 "
405 405 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
406 406 iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto));
407 407 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 "
408 408 "value 0x%llx", hdrp->value));
409 409 break;
410 410
411 411 default:
412 412 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP"));
413 413 goto fill_tx_header_done;
414 414 }
415 415
416 416 switch (ipproto) {
417 417 case IPPROTO_TCP:
418 418 HXGE_DEBUG_MSG((NULL, TX_CTL,
419 419 "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
420 420 if (l4_cksum) {
421 421 tmp = 1ull;
422 422 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
423 423 HXGE_DEBUG_MSG((NULL, TX_CTL,
424 424 "==> hxge_tx_pkt_hdr_init: TCP CKSUM"
425 425 "value 0x%llx", hdrp->value));
426 426 }
427 427 HXGE_DEBUG_MSG((NULL, TX_CTL,
428 428 "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value));
429 429 break;
430 430
431 431 case IPPROTO_UDP:
432 432 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP"));
433 433 if (l4_cksum) {
434 434 tmp = 0x2ull;
435 435 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
436 436 }
437 437 HXGE_DEBUG_MSG((NULL, TX_CTL,
438 438 "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
439 439 hdrp->value));
440 440 break;
441 441
442 442 default:
443 443 goto fill_tx_header_done;
444 444 }
445 445
446 446 fill_tx_header_done:
447 447 HXGE_DEBUG_MSG((NULL, TX_CTL,
448 448 "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
449 449 pkt_len, npads, hdrp->value));
450 450 HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr"));
451 451 }
452 452
453 453 /*ARGSUSED*/
454 454 p_mblk_t
455 455 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
456 456 {
457 457 p_mblk_t newmp = NULL;
458 458
459 459 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
460 460 HXGE_DEBUG_MSG((NULL, TX_CTL,
461 461 "<== hxge_tx_pkt_header_reserve: allocb failed"));
462 462 return (NULL);
463 463 }
464 464 HXGE_DEBUG_MSG((NULL, TX_CTL,
465 465 "==> hxge_tx_pkt_header_reserve: get new mp"));
466 466 DB_TYPE(newmp) = M_DATA;
467 467 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
468 468 linkb(newmp, mp);
469 469 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
470 470
471 471 HXGE_DEBUG_MSG((NULL, TX_CTL,
472 472 "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
473 473 newmp->b_rptr, newmp->b_wptr));
474 474 HXGE_DEBUG_MSG((NULL, TX_CTL,
475 475 "<== hxge_tx_pkt_header_reserve: use new mp"));
476 476 return (newmp);
477 477 }
478 478
479 479 int
480 480 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
481 481 {
482 482 uint_t nmblks;
483 483 ssize_t len;
484 484 uint_t pkt_len;
485 485 p_mblk_t nmp, bmp, tmp;
486 486 uint8_t *b_wptr;
487 487
488 488 HXGE_DEBUG_MSG((NULL, TX_CTL,
489 489 "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
490 490 mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
491 491
492 492 nmp = mp;
493 493 bmp = mp;
494 494 nmblks = 0;
495 495 pkt_len = 0;
496 496 *tot_xfer_len_p = 0;
497 497
498 498 while (nmp) {
499 499 len = MBLKL(nmp);
500 500 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
501 501 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
502 502 len, pkt_len, nmblks, *tot_xfer_len_p));
503 503
504 504 if (len <= 0) {
505 505 bmp = nmp;
506 506 nmp = nmp->b_cont;
507 507 HXGE_DEBUG_MSG((NULL, TX_CTL,
508 508 "==> hxge_tx_pkt_nmblocks:"
509 509 " len (0) pkt_len %d nmblks %d", pkt_len, nmblks));
510 510 continue;
511 511 }
512 512 *tot_xfer_len_p += len;
513 513 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
514 514 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
515 515 len, pkt_len, nmblks, *tot_xfer_len_p));
516 516
517 517 if (len < hxge_bcopy_thresh) {
518 518 HXGE_DEBUG_MSG((NULL, TX_CTL,
519 519 "==> hxge_tx_pkt_nmblocks: "
520 520 "len %d (< thresh) pkt_len %d nmblks %d",
521 521 len, pkt_len, nmblks));
522 522 if (pkt_len == 0)
523 523 nmblks++;
524 524 pkt_len += len;
525 525 if (pkt_len >= hxge_bcopy_thresh) {
526 526 pkt_len = 0;
527 527 len = 0;
528 528 nmp = bmp;
529 529 }
530 530 } else {
531 531 HXGE_DEBUG_MSG((NULL, TX_CTL,
532 532 "==> hxge_tx_pkt_nmblocks: "
533 533 "len %d (> thresh) pkt_len %d nmblks %d",
534 534 len, pkt_len, nmblks));
535 535 pkt_len = 0;
536 536 nmblks++;
537 537 /*
538 538 * Hardware limits the transfer length to 4K. If len is
539 539 * more than 4K, we need to break it up to at most 2
540 540 * more blocks.
541 541 */
542 542 if (len > TX_MAX_TRANSFER_LENGTH) {
543 543 uint32_t nsegs;
544 544
545 545 HXGE_DEBUG_MSG((NULL, TX_CTL,
546 546 "==> hxge_tx_pkt_nmblocks: "
547 547 "len %d pkt_len %d nmblks %d nsegs %d",
548 548 len, pkt_len, nmblks, nsegs));
549 549 nsegs = 1;
550 550 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
551 551 ++nsegs;
552 552 }
553 553 do {
554 554 b_wptr = nmp->b_rptr +
555 555 TX_MAX_TRANSFER_LENGTH;
556 556 nmp->b_wptr = b_wptr;
557 557 if ((tmp = dupb(nmp)) == NULL) {
558 558 return (0);
559 559 }
560 560 tmp->b_rptr = b_wptr;
561 561 tmp->b_wptr = nmp->b_wptr;
562 562 tmp->b_cont = nmp->b_cont;
563 563 nmp->b_cont = tmp;
564 564 nmblks++;
565 565 if (--nsegs) {
566 566 nmp = tmp;
567 567 }
568 568 } while (nsegs);
569 569 nmp = tmp;
570 570 }
571 571 }
572 572
573 573 /*
574 574 * Hardware limits the transmit gather pointers to 15.
575 575 */
576 576 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
577 577 TX_MAX_GATHER_POINTERS) {
578 578 HXGE_DEBUG_MSG((NULL, TX_CTL,
579 579 "==> hxge_tx_pkt_nmblocks: pull msg - "
580 580 "len %d pkt_len %d nmblks %d",
581 581 len, pkt_len, nmblks));
582 582 /* Pull all message blocks from b_cont */
583 583 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
584 584 return (0);
585 585 }
586 586 freemsg(nmp->b_cont);
587 587 nmp->b_cont = tmp;
588 588 pkt_len = 0;
589 589 }
590 590 bmp = nmp;
591 591 nmp = nmp->b_cont;
592 592 }
593 593
594 594 HXGE_DEBUG_MSG((NULL, TX_CTL,
595 595 "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
596 596 "nmblks %d len %d tot_xfer_len %d",
597 597 mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p));
598 598 return (nmblks);
599 599 }
600 600
601 601 boolean_t
602 602 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
603 603 {
604 604 boolean_t status = B_TRUE;
605 605 p_hxge_dma_common_t tx_desc_dma_p;
606 606 hxge_dma_common_t desc_area;
607 607 p_tx_desc_t tx_desc_ring_vp;
608 608 p_tx_desc_t tx_desc_p;
609 609 p_tx_desc_t tx_desc_pp;
610 610 tx_desc_t r_tx_desc;
611 611 p_tx_msg_t tx_msg_ring;
612 612 p_tx_msg_t tx_msg_p;
613 613 hpi_handle_t handle;
614 614 tdc_tdr_head_t tx_head;
615 615 uint32_t pkt_len;
616 616 uint_t tx_rd_index;
617 617 uint16_t head_index, tail_index;
618 618 uint8_t tdc;
619 619 boolean_t head_wrap, tail_wrap;
620 620 p_hxge_tx_ring_stats_t tdc_stats;
621 621 tdc_byte_cnt_t byte_cnt;
622 622 tdc_tdr_qlen_t qlen;
623 623 int rc;
624 624
625 625 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim"));
626 626
627 627 status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) &&
628 628 (nmblks != 0));
629 629 HXGE_DEBUG_MSG((hxgep, TX_CTL,
630 630 "==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
631 631 tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks));
632 632
633 633 if (!status) {
634 634 tx_desc_dma_p = &tx_ring_p->tdc_desc;
635 635 desc_area = tx_ring_p->tdc_desc;
636 636 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
637 637 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
638 638 tx_rd_index = tx_ring_p->rd_index;
639 639 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
640 640 tx_msg_ring = tx_ring_p->tx_msg_ring;
641 641 tx_msg_p = &tx_msg_ring[tx_rd_index];
642 642 tdc = tx_ring_p->tdc;
643 643 tdc_stats = tx_ring_p->tdc_stats;
644 644 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
645 645 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
646 646 }
647 647 tail_index = tx_ring_p->wr_index;
648 648 tail_wrap = tx_ring_p->wr_index_wrap;
649 649
650 650 /*
651 651 * tdc_byte_cnt reg can be used to get bytes transmitted. It
652 652 * includes padding too in case of runt packets.
653 653 */
654 654 handle = HXGE_DEV_HPI_HANDLE(hxgep);
655 655 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value);
656 656 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count;
657 657
658 658 HXGE_DEBUG_MSG((hxgep, TX_CTL,
659 659 "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
660 660 "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
661 661 tdc, tx_rd_index, tail_index, tail_wrap,
662 662 tx_desc_p, (*(uint64_t *)tx_desc_p)));
663 663
664 664 /*
665 665 * Read the hardware maintained transmit head and wrap around
666 666 * bit.
667 667 */
668 668 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value);
669 669 head_index = tx_head.bits.head;
670 670 head_wrap = tx_head.bits.wrap;
671 671 HXGE_DEBUG_MSG((hxgep, TX_CTL,
672 672 "==> hxge_txdma_reclaim: "
673 673 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
674 674 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
675 675
676 676 /*
677 677 * For debug only. This can be used to verify the qlen and make
678 678 * sure the hardware is wrapping the Tdr correctly.
679 679 */
680 680 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value);
681 681 HXGE_DEBUG_MSG((hxgep, TX_CTL,
682 682 "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
683 683 qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen));
684 684
685 685 if (head_index == tail_index) {
686 686 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index,
687 687 tail_wrap) && (head_index == tx_rd_index)) {
688 688 HXGE_DEBUG_MSG((hxgep, TX_CTL,
689 689 "==> hxge_txdma_reclaim: EMPTY"));
690 690 return (B_TRUE);
691 691 }
692 692 HXGE_DEBUG_MSG((hxgep, TX_CTL,
693 693 "==> hxge_txdma_reclaim: Checking if ring full"));
694 694 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
695 695 tail_wrap)) {
696 696 HXGE_DEBUG_MSG((hxgep, TX_CTL,
697 697 "==> hxge_txdma_reclaim: full"));
698 698 return (B_FALSE);
699 699 }
700 700 }
701 701 HXGE_DEBUG_MSG((hxgep, TX_CTL,
702 702 "==> hxge_txdma_reclaim: tx_rd_index and head_index"));
703 703
704 704 /* XXXX: limit the # of reclaims */
705 705 tx_desc_pp = &r_tx_desc;
706 706 while ((tx_rd_index != head_index) &&
707 707 (tx_ring_p->descs_pending != 0)) {
708 708 HXGE_DEBUG_MSG((hxgep, TX_CTL,
709 709 "==> hxge_txdma_reclaim: Checking if pending"));
710 710 HXGE_DEBUG_MSG((hxgep, TX_CTL,
711 711 "==> hxge_txdma_reclaim: descs_pending %d ",
712 712 tx_ring_p->descs_pending));
713 713 HXGE_DEBUG_MSG((hxgep, TX_CTL,
714 714 "==> hxge_txdma_reclaim: "
715 715 "(tx_rd_index %d head_index %d (tx_desc_p $%p)",
716 716 tx_rd_index, head_index, tx_desc_p));
717 717
718 718 tx_desc_pp->value = tx_desc_p->value;
719 719 HXGE_DEBUG_MSG((hxgep, TX_CTL,
720 720 "==> hxge_txdma_reclaim: "
721 721 "(tx_rd_index %d head_index %d "
722 722 "tx_desc_p $%p (desc value 0x%llx) ",
723 723 tx_rd_index, head_index,
724 724 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
725 725 HXGE_DEBUG_MSG((hxgep, TX_CTL,
726 726 "==> hxge_txdma_reclaim: dump desc:"));
727 727
728 728 /*
729 729 * tdc_byte_cnt reg can be used to get bytes
730 730 * transmitted
731 731 */
732 732 pkt_len = tx_desc_pp->bits.tr_len;
733 733 tdc_stats->obytes += pkt_len;
734 734 tdc_stats->opackets += tx_desc_pp->bits.sop;
735 735 HXGE_DEBUG_MSG((hxgep, TX_CTL,
736 736 "==> hxge_txdma_reclaim: pkt_len %d "
737 737 "tdc channel %d opackets %d",
738 738 pkt_len, tdc, tdc_stats->opackets));
739 739
740 740 if (tx_msg_p->flags.dma_type == USE_DVMA) {
741 741 HXGE_DEBUG_MSG((hxgep, TX_CTL,
742 742 "tx_desc_p = $%p tx_desc_pp = $%p "
743 743 "index = %d",
744 744 tx_desc_p, tx_desc_pp,
745 745 tx_ring_p->rd_index));
746 746 (void) dvma_unload(tx_msg_p->dvma_handle,
747 747 0, -1);
748 748 tx_msg_p->dvma_handle = NULL;
749 749 if (tx_ring_p->dvma_wr_index ==
750 750 tx_ring_p->dvma_wrap_mask) {
751 751 tx_ring_p->dvma_wr_index = 0;
752 752 } else {
753 753 tx_ring_p->dvma_wr_index++;
754 754 }
755 755 tx_ring_p->dvma_pending--;
756 756 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
757 757 HXGE_DEBUG_MSG((hxgep, TX_CTL,
758 758 "==> hxge_txdma_reclaim: USE DMA"));
759 759 if (rc = ddi_dma_unbind_handle
760 760 (tx_msg_p->dma_handle)) {
761 761 cmn_err(CE_WARN, "hxge_reclaim: "
762 762 "ddi_dma_unbind_handle "
763 763 "failed. status %d", rc);
764 764 }
765 765 }
766 766
767 767 HXGE_DEBUG_MSG((hxgep, TX_CTL,
768 768 "==> hxge_txdma_reclaim: count packets"));
769 769
770 770 /*
771 771 * count a chained packet only once.
772 772 */
773 773 if (tx_msg_p->tx_message != NULL) {
774 774 freemsg(tx_msg_p->tx_message);
775 775 tx_msg_p->tx_message = NULL;
776 776 }
777 777 tx_msg_p->flags.dma_type = USE_NONE;
778 778 tx_rd_index = tx_ring_p->rd_index;
779 779 tx_rd_index = (tx_rd_index + 1) &
↓ open down ↓ |
779 lines elided |
↑ open up ↑ |
780 780 tx_ring_p->tx_wrap_mask;
781 781 tx_ring_p->rd_index = tx_rd_index;
782 782 tx_ring_p->descs_pending--;
783 783 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
784 784 tx_msg_p = &tx_msg_ring[tx_rd_index];
785 785 }
786 786
787 787 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
788 788 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
789 789 if (status) {
790 - (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
790 + (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
791 + 1, 0);
791 792 }
792 793 } else {
793 794 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
794 795 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
795 796 }
796 797
797 798 HXGE_DEBUG_MSG((hxgep, TX_CTL,
798 799 "<== hxge_txdma_reclaim status = 0x%08x", status));
799 800 return (status);
800 801 }
801 802
802 803 uint_t
803 804 hxge_tx_intr(caddr_t arg1, caddr_t arg2)
804 805 {
805 806 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
806 807 p_hxge_t hxgep = (p_hxge_t)arg2;
807 808 p_hxge_ldg_t ldgp;
808 809 uint8_t channel;
809 810 uint32_t vindex;
810 811 hpi_handle_t handle;
811 812 tdc_stat_t cs;
812 813 p_tx_ring_t *tx_rings;
813 814 p_tx_ring_t tx_ring_p;
814 815 hpi_status_t rs = HPI_SUCCESS;
815 816 uint_t serviced = DDI_INTR_UNCLAIMED;
816 817 hxge_status_t status = HXGE_OK;
817 818
818 819 if (ldvp == NULL) {
819 820 HXGE_DEBUG_MSG((NULL, INT_CTL,
820 821 "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
821 822 return (DDI_INTR_UNCLAIMED);
822 823 }
823 824
824 825 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
825 826 hxgep = ldvp->hxgep;
826 827 }
827 828
828 829 /*
829 830 * If the interface is not started, just swallow the interrupt
830 831 * and don't rearm the logical device.
831 832 */
832 833 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
833 834 return (DDI_INTR_CLAIMED);
834 835
835 836 HXGE_DEBUG_MSG((hxgep, INT_CTL,
836 837 "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp));
837 838
838 839 /*
839 840 * This interrupt handler is for a specific transmit dma channel.
840 841 */
841 842 handle = HXGE_DEV_HPI_HANDLE(hxgep);
842 843
843 844 /* Get the control and status for this channel. */
844 845 channel = ldvp->channel;
845 846 ldgp = ldvp->ldgp;
846 847 HXGE_DEBUG_MSG((hxgep, INT_CTL,
847 848 "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
848 849 hxgep, ldvp, channel));
849 850
850 851 rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs);
851 852 vindex = ldvp->vdma_index;
852 853 HXGE_DEBUG_MSG((hxgep, INT_CTL,
853 854 "==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
854 855 channel, vindex, rs));
855 856
856 857 if (!rs && cs.bits.marked) {
857 858 HXGE_DEBUG_MSG((hxgep, INT_CTL,
858 859 "==> hxge_tx_intr:channel %d ring index %d "
859 860 "status 0x%08x (marked bit set)", channel, vindex, rs));
860 861 tx_rings = hxgep->tx_rings->rings;
861 862 tx_ring_p = tx_rings[vindex];
862 863 HXGE_DEBUG_MSG((hxgep, INT_CTL,
863 864 "==> hxge_tx_intr:channel %d ring index %d "
864 865 "status 0x%08x (marked bit set, calling reclaim)",
865 866 channel, vindex, rs));
866 867
867 868 MUTEX_ENTER(&tx_ring_p->lock);
868 869 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0);
869 870 MUTEX_EXIT(&tx_ring_p->lock);
870 871 mac_tx_update(hxgep->mach);
871 872 }
872 873
873 874 /*
874 875 * Process other transmit control and status. Check the ldv state.
875 876 */
876 877 status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
877 878
878 879 /* Clear the error bits */
879 880 RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
880 881
881 882 /*
882 883 * Rearm this logical group if this is a single device group.
883 884 */
884 885 if (ldgp->nldvs == 1) {
885 886 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm"));
886 887 if (status == HXGE_OK) {
887 888 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
888 889 B_TRUE, ldgp->ldg_timer);
889 890 }
890 891 }
891 892 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr"));
892 893 serviced = DDI_INTR_CLAIMED;
893 894 return (serviced);
894 895 }
895 896
896 897 void
897 898 hxge_txdma_stop(p_hxge_t hxgep)
898 899 {
899 900 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop"));
900 901
901 902 (void) hxge_tx_vmac_disable(hxgep);
902 903 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
903 904
904 905 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop"));
905 906 }
906 907
907 908 hxge_status_t
908 909 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
909 910 {
910 911 int i, ndmas;
911 912 uint16_t channel;
912 913 p_tx_rings_t tx_rings;
913 914 p_tx_ring_t *tx_desc_rings;
914 915 hpi_handle_t handle;
915 916 hpi_status_t rs = HPI_SUCCESS;
916 917 hxge_status_t status = HXGE_OK;
917 918
918 919 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
919 920 "==> hxge_txdma_hw_mode: enable mode %d", enable));
920 921
921 922 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
922 923 HXGE_DEBUG_MSG((hxgep, TX_CTL,
923 924 "<== hxge_txdma_mode: not initialized"));
924 925 return (HXGE_ERROR);
925 926 }
926 927 tx_rings = hxgep->tx_rings;
927 928 if (tx_rings == NULL) {
928 929 HXGE_DEBUG_MSG((hxgep, TX_CTL,
929 930 "<== hxge_txdma_hw_mode: NULL global ring pointer"));
930 931 return (HXGE_ERROR);
931 932 }
932 933 tx_desc_rings = tx_rings->rings;
933 934 if (tx_desc_rings == NULL) {
934 935 HXGE_DEBUG_MSG((hxgep, TX_CTL,
935 936 "<== hxge_txdma_hw_mode: NULL rings pointer"));
936 937 return (HXGE_ERROR);
937 938 }
938 939 ndmas = tx_rings->ndmas;
939 940 if (!ndmas) {
940 941 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
941 942 "<== hxge_txdma_hw_mode: no dma channel allocated"));
942 943 return (HXGE_ERROR);
943 944 }
944 945 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: "
945 946 "tx_rings $%p tx_desc_rings $%p ndmas %d",
946 947 tx_rings, tx_desc_rings, ndmas));
947 948
948 949 handle = HXGE_DEV_HPI_HANDLE(hxgep);
949 950 for (i = 0; i < ndmas; i++) {
950 951 if (tx_desc_rings[i] == NULL) {
951 952 continue;
952 953 }
953 954 channel = tx_desc_rings[i]->tdc;
954 955 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
955 956 "==> hxge_txdma_hw_mode: channel %d", channel));
956 957 if (enable) {
957 958 rs = hpi_txdma_channel_enable(handle, channel);
958 959 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
959 960 "==> hxge_txdma_hw_mode: channel %d (enable) "
960 961 "rs 0x%x", channel, rs));
961 962 } else {
962 963 /*
963 964 * Stop the dma channel and waits for the stop done. If
964 965 * the stop done bit is not set, then force an error so
965 966 * TXC will stop. All channels bound to this port need
966 967 * to be stopped and reset after injecting an interrupt
967 968 * error.
968 969 */
969 970 rs = hpi_txdma_channel_disable(handle, channel);
970 971 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
971 972 "==> hxge_txdma_hw_mode: channel %d (disable) "
972 973 "rs 0x%x", channel, rs));
973 974 }
974 975 }
975 976
976 977 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
977 978
978 979 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
979 980 "<== hxge_txdma_hw_mode: status 0x%x", status));
980 981
981 982 return (status);
982 983 }
983 984
984 985 void
985 986 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel)
986 987 {
987 988 hpi_handle_t handle;
988 989
989 990 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
990 991 "==> hxge_txdma_enable_channel: channel %d", channel));
991 992
992 993 handle = HXGE_DEV_HPI_HANDLE(hxgep);
993 994 /* enable the transmit dma channels */
994 995 (void) hpi_txdma_channel_enable(handle, channel);
995 996
996 997 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel"));
997 998 }
998 999
999 1000 void
1000 1001 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel)
1001 1002 {
1002 1003 hpi_handle_t handle;
1003 1004
1004 1005 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1005 1006 "==> hxge_txdma_disable_channel: channel %d", channel));
1006 1007
1007 1008 handle = HXGE_DEV_HPI_HANDLE(hxgep);
1008 1009 /* stop the transmit dma channels */
1009 1010 (void) hpi_txdma_channel_disable(handle, channel);
1010 1011
1011 1012 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel"));
1012 1013 }
1013 1014
1014 1015 int
1015 1016 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel)
1016 1017 {
1017 1018 hpi_handle_t handle;
1018 1019 int status;
1019 1020 hpi_status_t rs = HPI_SUCCESS;
1020 1021
1021 1022 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err"));
1022 1023
1023 1024 /*
1024 1025 * Stop the dma channel waits for the stop done. If the stop done bit
1025 1026 * is not set, then create an error.
1026 1027 */
1027 1028 handle = HXGE_DEV_HPI_HANDLE(hxgep);
1028 1029 rs = hpi_txdma_channel_disable(handle, channel);
1029 1030 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
1030 1031 if (status == HXGE_OK) {
1031 1032 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1032 1033 "<== hxge_txdma_stop_inj_err (channel %d): "
1033 1034 "stopped OK", channel));
1034 1035 return (status);
1035 1036 }
1036 1037
1037 1038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1038 1039 "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1039 1040 " (injected error but still not stopped)", channel, rs));
1040 1041
1041 1042 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err"));
1042 1043
1043 1044 return (status);
1044 1045 }
1045 1046
1046 1047 /*ARGSUSED*/
1047 1048 void
1048 1049 hxge_fixup_txdma_rings(p_hxge_t hxgep)
1049 1050 {
1050 1051 int index, ndmas;
1051 1052 uint16_t channel;
1052 1053 p_tx_rings_t tx_rings;
1053 1054
1054 1055 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings"));
1055 1056
1056 1057 /*
1057 1058 * For each transmit channel, reclaim each descriptor and free buffers.
1058 1059 */
1059 1060 tx_rings = hxgep->tx_rings;
1060 1061 if (tx_rings == NULL) {
1061 1062 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1062 1063 "<== hxge_fixup_txdma_rings: NULL ring pointer"));
1063 1064 return;
1064 1065 }
1065 1066
1066 1067 ndmas = tx_rings->ndmas;
1067 1068 if (!ndmas) {
1068 1069 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1069 1070 "<== hxge_fixup_txdma_rings: no channel allocated"));
1070 1071 return;
1071 1072 }
1072 1073
1073 1074 if (tx_rings->rings == NULL) {
1074 1075 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1075 1076 "<== hxge_fixup_txdma_rings: NULL rings pointer"));
1076 1077 return;
1077 1078 }
1078 1079
1079 1080 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: "
1080 1081 "tx_rings $%p tx_desc_rings $%p ndmas %d",
1081 1082 tx_rings, tx_rings->rings, ndmas));
1082 1083
1083 1084 for (index = 0; index < ndmas; index++) {
1084 1085 channel = tx_rings->rings[index]->tdc;
1085 1086 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1086 1087 "==> hxge_fixup_txdma_rings: channel %d", channel));
1087 1088 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index],
1088 1089 channel);
1089 1090 }
1090 1091
1091 1092 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings"));
1092 1093 }
1093 1094
1094 1095 /*ARGSUSED*/
1095 1096 void
1096 1097 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel)
1097 1098 {
1098 1099 p_tx_ring_t ring_p;
1099 1100
1100 1101 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel"));
1101 1102
1102 1103 ring_p = hxge_txdma_get_ring(hxgep, channel);
1103 1104 if (ring_p == NULL) {
1104 1105 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1105 1106 return;
1106 1107 }
1107 1108
1108 1109 if (ring_p->tdc != channel) {
1109 1110 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1110 1111 "<== hxge_txdma_fix_channel: channel not matched "
1111 1112 "ring tdc %d passed channel", ring_p->tdc, channel));
1112 1113 return;
1113 1114 }
1114 1115
1115 1116 hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1116 1117
1117 1118 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1118 1119 }
1119 1120
1120 1121 /*ARGSUSED*/
1121 1122 void
1122 1123 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1123 1124 {
1124 1125 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel"));
1125 1126
1126 1127 if (ring_p == NULL) {
1127 1128 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1128 1129 "<== hxge_txdma_fixup_channel: NULL ring pointer"));
1129 1130 return;
1130 1131 }
1131 1132 if (ring_p->tdc != channel) {
1132 1133 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1133 1134 "<== hxge_txdma_fixup_channel: channel not matched "
1134 1135 "ring tdc %d passed channel", ring_p->tdc, channel));
1135 1136 return;
1136 1137 }
1137 1138 MUTEX_ENTER(&ring_p->lock);
1138 1139 (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1139 1140
1140 1141 ring_p->rd_index = 0;
1141 1142 ring_p->wr_index = 0;
1142 1143 ring_p->ring_head.value = 0;
1143 1144 ring_p->ring_kick_tail.value = 0;
1144 1145 ring_p->descs_pending = 0;
1145 1146 MUTEX_EXIT(&ring_p->lock);
1146 1147
1147 1148 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel"));
1148 1149 }
1149 1150
1150 1151 /*ARGSUSED*/
1151 1152 void
1152 1153 hxge_txdma_hw_kick(p_hxge_t hxgep)
1153 1154 {
1154 1155 int index, ndmas;
1155 1156 uint16_t channel;
1156 1157 p_tx_rings_t tx_rings;
1157 1158
1158 1159 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick"));
1159 1160
1160 1161 tx_rings = hxgep->tx_rings;
1161 1162 if (tx_rings == NULL) {
1162 1163 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1163 1164 "<== hxge_txdma_hw_kick: NULL ring pointer"));
1164 1165 return;
1165 1166 }
1166 1167 ndmas = tx_rings->ndmas;
1167 1168 if (!ndmas) {
1168 1169 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1169 1170 "<== hxge_txdma_hw_kick: no channel allocated"));
1170 1171 return;
1171 1172 }
1172 1173 if (tx_rings->rings == NULL) {
1173 1174 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1174 1175 "<== hxge_txdma_hw_kick: NULL rings pointer"));
1175 1176 return;
1176 1177 }
1177 1178 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: "
1178 1179 "tx_rings $%p tx_desc_rings $%p ndmas %d",
1179 1180 tx_rings, tx_rings->rings, ndmas));
1180 1181
1181 1182 for (index = 0; index < ndmas; index++) {
1182 1183 channel = tx_rings->rings[index]->tdc;
1183 1184 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1184 1185 "==> hxge_txdma_hw_kick: channel %d", channel));
1185 1186 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index],
1186 1187 channel);
1187 1188 }
1188 1189
1189 1190 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick"));
1190 1191 }
1191 1192
1192 1193 /*ARGSUSED*/
1193 1194 void
1194 1195 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel)
1195 1196 {
1196 1197 p_tx_ring_t ring_p;
1197 1198
1198 1199 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel"));
1199 1200
1200 1201 ring_p = hxge_txdma_get_ring(hxgep, channel);
1201 1202 if (ring_p == NULL) {
1202 1203 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel"));
1203 1204 return;
1204 1205 }
1205 1206
1206 1207 if (ring_p->tdc != channel) {
1207 1208 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1208 1209 "<== hxge_txdma_kick_channel: channel not matched "
1209 1210 "ring tdc %d passed channel", ring_p->tdc, channel));
1210 1211 return;
1211 1212 }
1212 1213
1213 1214 hxge_txdma_hw_kick_channel(hxgep, ring_p, channel);
1214 1215
1215 1216 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel"));
1216 1217 }
1217 1218
1218 1219 /*ARGSUSED*/
1219 1220 void
1220 1221 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1221 1222 {
1222 1223 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel"));
1223 1224
1224 1225 if (ring_p == NULL) {
1225 1226 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1226 1227 "<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
1227 1228 return;
1228 1229 }
1229 1230
1230 1231 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel"));
1231 1232 }
1232 1233
1233 1234 /*ARGSUSED*/
1234 1235 void
1235 1236 hxge_check_tx_hang(p_hxge_t hxgep)
1236 1237 {
1237 1238 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang"));
1238 1239
1239 1240 /*
1240 1241 * Needs inputs from hardware for regs: head index had not moved since
1241 1242 * last timeout. packets not transmitted or stuffed registers.
1242 1243 */
1243 1244 if (hxge_txdma_hung(hxgep)) {
1244 1245 hxge_fixup_hung_txdma_rings(hxgep);
1245 1246 }
1246 1247
1247 1248 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang"));
1248 1249 }
1249 1250
1250 1251 int
1251 1252 hxge_txdma_hung(p_hxge_t hxgep)
1252 1253 {
1253 1254 int index, ndmas;
1254 1255 uint16_t channel;
1255 1256 p_tx_rings_t tx_rings;
1256 1257 p_tx_ring_t tx_ring_p;
1257 1258
1258 1259 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung"));
1259 1260
1260 1261 tx_rings = hxgep->tx_rings;
1261 1262 if (tx_rings == NULL) {
1262 1263 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1263 1264 "<== hxge_txdma_hung: NULL ring pointer"));
1264 1265 return (B_FALSE);
1265 1266 }
1266 1267
1267 1268 ndmas = tx_rings->ndmas;
1268 1269 if (!ndmas) {
1269 1270 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1270 1271 "<== hxge_txdma_hung: no channel allocated"));
1271 1272 return (B_FALSE);
1272 1273 }
1273 1274
1274 1275 if (tx_rings->rings == NULL) {
1275 1276 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1276 1277 "<== hxge_txdma_hung: NULL rings pointer"));
1277 1278 return (B_FALSE);
1278 1279 }
1279 1280
1280 1281 for (index = 0; index < ndmas; index++) {
1281 1282 channel = tx_rings->rings[index]->tdc;
1282 1283 tx_ring_p = tx_rings->rings[index];
1283 1284 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1284 1285 "==> hxge_txdma_hung: channel %d", channel));
1285 1286 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) {
1286 1287 return (B_TRUE);
1287 1288 }
1288 1289 }
1289 1290
1290 1291 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung"));
1291 1292
1292 1293 return (B_FALSE);
1293 1294 }
1294 1295
1295 1296 int
1296 1297 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1297 1298 {
1298 1299 uint16_t head_index, tail_index;
1299 1300 boolean_t head_wrap, tail_wrap;
1300 1301 hpi_handle_t handle;
1301 1302 tdc_tdr_head_t tx_head;
1302 1303 uint_t tx_rd_index;
1303 1304
1304 1305 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung"));
1305 1306
1306 1307 handle = HXGE_DEV_HPI_HANDLE(hxgep);
1307 1308 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1308 1309 "==> hxge_txdma_channel_hung: channel %d", channel));
1309 1310 MUTEX_ENTER(&tx_ring_p->lock);
1310 1311 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
1311 1312
1312 1313 tail_index = tx_ring_p->wr_index;
1313 1314 tail_wrap = tx_ring_p->wr_index_wrap;
1314 1315 tx_rd_index = tx_ring_p->rd_index;
1315 1316 MUTEX_EXIT(&tx_ring_p->lock);
1316 1317
1317 1318 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1318 1319 "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1319 1320 "tail_index %d tail_wrap %d ",
1320 1321 channel, tx_rd_index, tail_index, tail_wrap));
1321 1322 /*
1322 1323 * Read the hardware maintained transmit head and wrap around bit.
1323 1324 */
1324 1325 (void) hpi_txdma_ring_head_get(handle, channel, &tx_head);
1325 1326 head_index = tx_head.bits.head;
1326 1327 head_wrap = tx_head.bits.wrap;
1327 1328 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: "
1328 1329 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
1329 1330 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
1330 1331
1331 1332 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) &&
1332 1333 (head_index == tx_rd_index)) {
1333 1334 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1334 1335 "==> hxge_txdma_channel_hung: EMPTY"));
1335 1336 return (B_FALSE);
1336 1337 }
1337 1338 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1338 1339 "==> hxge_txdma_channel_hung: Checking if ring full"));
1339 1340 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) {
1340 1341 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1341 1342 "==> hxge_txdma_channel_hung: full"));
1342 1343 return (B_TRUE);
1343 1344 }
1344 1345
1345 1346 /* If not full, check with hardware to see if it is hung */
1346 1347 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung"));
1347 1348
1348 1349 return (B_FALSE);
1349 1350 }
1350 1351
1351 1352 /*ARGSUSED*/
1352 1353 void
1353 1354 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)
1354 1355 {
1355 1356 int index, ndmas;
1356 1357 uint16_t channel;
1357 1358 p_tx_rings_t tx_rings;
1358 1359
1359 1360 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings"));
1360 1361 tx_rings = hxgep->tx_rings;
1361 1362 if (tx_rings == NULL) {
1362 1363 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1363 1364 "<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
1364 1365 return;
1365 1366 }
1366 1367 ndmas = tx_rings->ndmas;
1367 1368 if (!ndmas) {
1368 1369 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1369 1370 "<== hxge_fixup_hung_txdma_rings: no channel allocated"));
1370 1371 return;
1371 1372 }
1372 1373 if (tx_rings->rings == NULL) {
1373 1374 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1374 1375 "<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
1375 1376 return;
1376 1377 }
1377 1378 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: "
1378 1379 "tx_rings $%p tx_desc_rings $%p ndmas %d",
1379 1380 tx_rings, tx_rings->rings, ndmas));
1380 1381
1381 1382 for (index = 0; index < ndmas; index++) {
1382 1383 channel = tx_rings->rings[index]->tdc;
1383 1384 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1384 1385 "==> hxge_fixup_hung_txdma_rings: channel %d", channel));
1385 1386 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index],
1386 1387 channel);
1387 1388 }
1388 1389
1389 1390 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings"));
1390 1391 }
1391 1392
1392 1393 /*ARGSUSED*/
1393 1394 void
1394 1395 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel)
1395 1396 {
1396 1397 p_tx_ring_t ring_p;
1397 1398
1398 1399 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel"));
1399 1400 ring_p = hxge_txdma_get_ring(hxgep, channel);
1400 1401 if (ring_p == NULL) {
1401 1402 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1402 1403 "<== hxge_txdma_fix_hung_channel"));
1403 1404 return;
1404 1405 }
1405 1406 if (ring_p->tdc != channel) {
1406 1407 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1407 1408 "<== hxge_txdma_fix_hung_channel: channel not matched "
1408 1409 "ring tdc %d passed channel", ring_p->tdc, channel));
1409 1410 return;
1410 1411 }
1411 1412 hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1412 1413
1413 1414 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel"));
1414 1415 }
1415 1416
1416 1417 /*ARGSUSED*/
1417 1418 void
1418 1419 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
1419 1420 uint16_t channel)
1420 1421 {
1421 1422 hpi_handle_t handle;
1422 1423 int status = HXGE_OK;
1423 1424
1424 1425 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel"));
1425 1426
1426 1427 if (ring_p == NULL) {
1427 1428 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1428 1429 "<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
1429 1430 return;
1430 1431 }
1431 1432 if (ring_p->tdc != channel) {
1432 1433 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1433 1434 "<== hxge_txdma_fixup_hung_channel: channel "
1434 1435 "not matched ring tdc %d passed channel",
1435 1436 ring_p->tdc, channel));
1436 1437 return;
1437 1438 }
1438 1439 /* Reclaim descriptors */
1439 1440 MUTEX_ENTER(&ring_p->lock);
1440 1441 (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1441 1442 MUTEX_EXIT(&ring_p->lock);
1442 1443
1443 1444 handle = HXGE_DEV_HPI_HANDLE(hxgep);
1444 1445 /*
1445 1446 * Stop the dma channel waits for the stop done. If the stop done bit
1446 1447 * is not set, then force an error.
1447 1448 */
1448 1449 status = hpi_txdma_channel_disable(handle, channel);
1449 1450 if (!(status & HPI_TXDMA_STOP_FAILED)) {
1450 1451 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1451 1452 "<== hxge_txdma_fixup_hung_channel: stopped OK "
1452 1453 "ring tdc %d passed channel %d", ring_p->tdc, channel));
1453 1454 return;
1454 1455 }
1455 1456 /* Stop done bit will be set as a result of error injection */
1456 1457 status = hpi_txdma_channel_disable(handle, channel);
1457 1458 if (!(status & HPI_TXDMA_STOP_FAILED)) {
1458 1459 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1459 1460 "<== hxge_txdma_fixup_hung_channel: stopped again"
1460 1461 "ring tdc %d passed channel", ring_p->tdc, channel));
1461 1462 return;
1462 1463 }
1463 1464
1464 1465 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1465 1466 "<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
1466 1467 "ring tdc %d passed channel", ring_p->tdc, channel));
1467 1468 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel"));
1468 1469 }
1469 1470
1470 1471 /*ARGSUSED*/
1471 1472 void
1472 1473 hxge_reclaim_rings(p_hxge_t hxgep)
1473 1474 {
1474 1475 int index, ndmas;
1475 1476 uint16_t channel;
1476 1477 p_tx_rings_t tx_rings;
1477 1478 p_tx_ring_t tx_ring_p;
1478 1479
1479 1480 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring"));
1480 1481 tx_rings = hxgep->tx_rings;
1481 1482 if (tx_rings == NULL) {
1482 1483 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1483 1484 "<== hxge_reclain_rimgs: NULL ring pointer"));
1484 1485 return;
1485 1486 }
1486 1487 ndmas = tx_rings->ndmas;
1487 1488 if (!ndmas) {
1488 1489 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1489 1490 "<== hxge_reclain_rimgs: no channel allocated"));
1490 1491 return;
1491 1492 }
1492 1493 if (tx_rings->rings == NULL) {
1493 1494 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1494 1495 "<== hxge_reclain_rimgs: NULL rings pointer"));
1495 1496 return;
1496 1497 }
1497 1498 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: "
1498 1499 "tx_rings $%p tx_desc_rings $%p ndmas %d",
1499 1500 tx_rings, tx_rings->rings, ndmas));
1500 1501
1501 1502 for (index = 0; index < ndmas; index++) {
1502 1503 channel = tx_rings->rings[index]->tdc;
1503 1504 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d",
1504 1505 channel));
1505 1506 tx_ring_p = tx_rings->rings[index];
1506 1507 MUTEX_ENTER(&tx_ring_p->lock);
1507 1508 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel);
1508 1509 MUTEX_EXIT(&tx_ring_p->lock);
1509 1510 }
1510 1511
1511 1512 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings"));
1512 1513 }
1513 1514
1514 1515 /*
1515 1516 * Static functions start here.
1516 1517 */
1517 1518 static hxge_status_t
1518 1519 hxge_map_txdma(p_hxge_t hxgep)
1519 1520 {
1520 1521 int i, ndmas;
1521 1522 uint16_t channel;
1522 1523 p_tx_rings_t tx_rings;
1523 1524 p_tx_ring_t *tx_desc_rings;
1524 1525 p_tx_mbox_areas_t tx_mbox_areas_p;
1525 1526 p_tx_mbox_t *tx_mbox_p;
1526 1527 p_hxge_dma_pool_t dma_buf_poolp;
1527 1528 p_hxge_dma_pool_t dma_cntl_poolp;
1528 1529 p_hxge_dma_common_t *dma_buf_p;
1529 1530 p_hxge_dma_common_t *dma_cntl_p;
1530 1531 hxge_status_t status = HXGE_OK;
1531 1532
1532 1533 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma"));
1533 1534
1534 1535 dma_buf_poolp = hxgep->tx_buf_pool_p;
1535 1536 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
1536 1537
1537 1538 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1538 1539 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1539 1540 "==> hxge_map_txdma: buf not allocated"));
1540 1541 return (HXGE_ERROR);
1541 1542 }
1542 1543 ndmas = dma_buf_poolp->ndmas;
1543 1544 if (!ndmas) {
1544 1545 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1545 1546 "<== hxge_map_txdma: no dma allocated"));
1546 1547 return (HXGE_ERROR);
1547 1548 }
1548 1549 dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
1549 1550 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1550 1551
1551 1552 tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
1552 1553 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
1553 1554 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
1554 1555
1555 1556 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1556 1557 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
1557 1558
1558 1559 tx_mbox_areas_p = (p_tx_mbox_areas_t)
1559 1560 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
1560 1561 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
1561 1562 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
1562 1563
1563 1564 /*
1564 1565 * Map descriptors from the buffer pools for each dma channel.
1565 1566 */
1566 1567 for (i = 0; i < ndmas; i++) {
1567 1568 /*
1568 1569 * Set up and prepare buffer blocks, descriptors and mailbox.
1569 1570 */
1570 1571 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1571 1572 status = hxge_map_txdma_channel(hxgep, channel,
1572 1573 (p_hxge_dma_common_t *)&dma_buf_p[i],
1573 1574 (p_tx_ring_t *)&tx_desc_rings[i],
1574 1575 dma_buf_poolp->num_chunks[i],
1575 1576 (p_hxge_dma_common_t *)&dma_cntl_p[i],
1576 1577 (p_tx_mbox_t *)&tx_mbox_p[i]);
1577 1578 if (status != HXGE_OK) {
1578 1579 goto hxge_map_txdma_fail1;
1579 1580 }
1580 1581 tx_desc_rings[i]->index = (uint16_t)i;
1581 1582 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i];
1582 1583 }
1583 1584
1584 1585 tx_rings->ndmas = ndmas;
1585 1586 tx_rings->rings = tx_desc_rings;
1586 1587 hxgep->tx_rings = tx_rings;
1587 1588 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
1588 1589 hxgep->tx_mbox_areas_p = tx_mbox_areas_p;
1589 1590
1590 1591 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1591 1592 "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings));
1592 1593 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1593 1594 "tx_rings $%p tx_desc_rings $%p",
1594 1595 hxgep->tx_rings, tx_desc_rings));
1595 1596
1596 1597 goto hxge_map_txdma_exit;
1597 1598
1598 1599 hxge_map_txdma_fail1:
1599 1600 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1600 1601 "==> hxge_map_txdma: uninit tx desc "
1601 1602 "(status 0x%x channel %d i %d)", hxgep, status, channel, i));
1602 1603 i--;
1603 1604 for (; i >= 0; i--) {
1604 1605 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1605 1606 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i],
1606 1607 tx_mbox_p[i]);
1607 1608 }
1608 1609
1609 1610 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1610 1611 KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1611 1612 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1612 1613 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1613 1614
1614 1615 hxge_map_txdma_exit:
1615 1616 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1616 1617 "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel));
1617 1618
1618 1619 return (status);
1619 1620 }
1620 1621
1621 1622 static void
1622 1623 hxge_unmap_txdma(p_hxge_t hxgep)
1623 1624 {
1624 1625 int i, ndmas;
1625 1626 uint8_t channel;
1626 1627 p_tx_rings_t tx_rings;
1627 1628 p_tx_ring_t *tx_desc_rings;
1628 1629 p_tx_mbox_areas_t tx_mbox_areas_p;
1629 1630 p_tx_mbox_t *tx_mbox_p;
1630 1631 p_hxge_dma_pool_t dma_buf_poolp;
1631 1632
1632 1633 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma"));
1633 1634
1634 1635 dma_buf_poolp = hxgep->tx_buf_pool_p;
1635 1636 if (!dma_buf_poolp->buf_allocated) {
1636 1637 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1637 1638 "==> hxge_unmap_txdma: buf not allocated"));
1638 1639 return;
1639 1640 }
1640 1641 ndmas = dma_buf_poolp->ndmas;
1641 1642 if (!ndmas) {
1642 1643 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1643 1644 "<== hxge_unmap_txdma: no dma allocated"));
1644 1645 return;
1645 1646 }
1646 1647 tx_rings = hxgep->tx_rings;
1647 1648 tx_desc_rings = tx_rings->rings;
1648 1649 if (tx_rings == NULL) {
1649 1650 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1650 1651 "<== hxge_unmap_txdma: NULL ring pointer"));
1651 1652 return;
1652 1653 }
1653 1654 tx_desc_rings = tx_rings->rings;
1654 1655 if (tx_desc_rings == NULL) {
1655 1656 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1656 1657 "<== hxge_unmap_txdma: NULL ring pointers"));
1657 1658 return;
1658 1659 }
1659 1660 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: "
1660 1661 "tx_rings $%p tx_desc_rings $%p ndmas %d",
1661 1662 tx_rings, tx_desc_rings, ndmas));
1662 1663
1663 1664 tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
1664 1665 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
1665 1666
1666 1667 for (i = 0; i < ndmas; i++) {
1667 1668 channel = tx_desc_rings[i]->tdc;
1668 1669 (void) hxge_unmap_txdma_channel(hxgep, channel,
1669 1670 (p_tx_ring_t)tx_desc_rings[i],
1670 1671 (p_tx_mbox_t)tx_mbox_p[i]);
1671 1672 }
1672 1673
1673 1674 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1674 1675 KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1675 1676 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1676 1677 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1677 1678
1678 1679 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma"));
1679 1680 }
1680 1681
1681 1682 static hxge_status_t
1682 1683 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1683 1684 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
1684 1685 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
1685 1686 p_tx_mbox_t *tx_mbox_p)
1686 1687 {
1687 1688 int status = HXGE_OK;
1688 1689
1689 1690 /*
1690 1691 * Set up and prepare buffer blocks, descriptors and mailbox.
1691 1692 */
1692 1693 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1693 1694 "==> hxge_map_txdma_channel (channel %d)", channel));
1694 1695
1695 1696 /*
1696 1697 * Transmit buffer blocks
1697 1698 */
1698 1699 status = hxge_map_txdma_channel_buf_ring(hxgep, channel,
1699 1700 dma_buf_p, tx_desc_p, num_chunks);
1700 1701 if (status != HXGE_OK) {
1701 1702 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1702 1703 "==> hxge_map_txdma_channel (channel %d): "
1703 1704 "map buffer failed 0x%x", channel, status));
1704 1705 goto hxge_map_txdma_channel_exit;
1705 1706 }
1706 1707 /*
1707 1708 * Transmit block ring, and mailbox.
1708 1709 */
1709 1710 hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p,
1710 1711 tx_mbox_p);
1711 1712
1712 1713 goto hxge_map_txdma_channel_exit;
1713 1714
1714 1715 hxge_map_txdma_channel_fail1:
1715 1716 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1716 1717 "==> hxge_map_txdma_channel: unmap buf"
1717 1718 "(status 0x%x channel %d)", status, channel));
1718 1719 hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p);
1719 1720
1720 1721 hxge_map_txdma_channel_exit:
1721 1722 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1722 1723 "<== hxge_map_txdma_channel: (status 0x%x channel %d)",
1723 1724 status, channel));
1724 1725
1725 1726 return (status);
1726 1727 }
1727 1728
1728 1729 /*ARGSUSED*/
1729 1730 static void
1730 1731 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1731 1732 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1732 1733 {
1733 1734 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1734 1735 "==> hxge_unmap_txdma_channel (channel %d)", channel));
1735 1736
1736 1737 /* unmap tx block ring, and mailbox. */
1737 1738 (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p);
1738 1739
1739 1740 /* unmap buffer blocks */
1740 1741 (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p);
1741 1742
1742 1743 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel"));
1743 1744 }
1744 1745
1745 1746 /*ARGSUSED*/
1746 1747 static void
1747 1748 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
1748 1749 p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p,
1749 1750 p_tx_mbox_t *tx_mbox_p)
1750 1751 {
1751 1752 p_tx_mbox_t mboxp;
1752 1753 p_hxge_dma_common_t cntl_dmap;
1753 1754 p_hxge_dma_common_t dmap;
1754 1755 tdc_tdr_cfg_t *tx_ring_cfig_p;
1755 1756 tdc_tdr_kick_t *tx_ring_kick_p;
1756 1757 tdc_tdr_cfg_t *tx_cs_p;
1757 1758 tdc_int_mask_t *tx_evmask_p;
1758 1759 tdc_mbh_t *mboxh_p;
1759 1760 tdc_mbl_t *mboxl_p;
1760 1761 uint64_t tx_desc_len;
1761 1762
1762 1763 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1763 1764 "==> hxge_map_txdma_channel_cfg_ring"));
1764 1765
1765 1766 cntl_dmap = *dma_cntl_p;
1766 1767
1767 1768 dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc;
1768 1769 hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
1769 1770 sizeof (tx_desc_t));
1770 1771
1771 1772 /*
1772 1773 * Zero out transmit ring descriptors.
1773 1774 */
1774 1775 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1775 1776 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
1776 1777 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
1777 1778 tx_cs_p = &(tx_ring_p->tx_cs);
1778 1779 tx_evmask_p = &(tx_ring_p->tx_evmask);
1779 1780 tx_ring_cfig_p->value = 0;
1780 1781 tx_ring_kick_p->value = 0;
1781 1782 tx_cs_p->value = 0;
1782 1783 tx_evmask_p->value = 0;
1783 1784
1784 1785 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1785 1786 "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
1786 1787 dma_channel, dmap->dma_cookie.dmac_laddress));
1787 1788
1788 1789 tx_ring_cfig_p->value = 0;
1789 1790
1790 1791 /* Hydra len is 11 bits and the lower 5 bits are 0s */
1791 1792 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5);
1792 1793 tx_ring_cfig_p->value =
1793 1794 (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) |
1794 1795 (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT);
1795 1796
1796 1797 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1797 1798 "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
1798 1799 dma_channel, tx_ring_cfig_p->value));
1799 1800
1800 1801 tx_cs_p->bits.reset = 1;
1801 1802
1802 1803 /* Map in mailbox */
1803 1804 mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
1804 1805 dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox;
1805 1806 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
1806 1807 mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh;
1807 1808 mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl;
1808 1809 mboxh_p->value = mboxl_p->value = 0;
1809 1810
1810 1811 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1811 1812 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1812 1813 dmap->dma_cookie.dmac_laddress));
1813 1814
1814 1815 mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
1815 1816 TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK);
1816 1817 mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress &
1817 1818 TDC_MBL_MASK) >> TDC_MBL_SHIFT);
1818 1819
1819 1820 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1820 1821 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1821 1822 dmap->dma_cookie.dmac_laddress));
1822 1823 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1823 1824 "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
1824 1825 mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr));
1825 1826
1826 1827 /*
1827 1828 * Set page valid and no mask
1828 1829 */
1829 1830 tx_ring_p->page_hdl.value = 0;
1830 1831
1831 1832 *tx_mbox_p = mboxp;
1832 1833
1833 1834 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1834 1835 "<== hxge_map_txdma_channel_cfg_ring"));
1835 1836 }
1836 1837
1837 1838 /*ARGSUSED*/
1838 1839 static void
1839 1840 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
1840 1841 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1841 1842 {
1842 1843 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1843 1844 "==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
1844 1845 tx_ring_p->tdc));
1845 1846
1846 1847 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
1847 1848
1848 1849 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1849 1850 "<== hxge_unmap_txdma_channel_cfg_ring"));
1850 1851 }
1851 1852
1852 1853 static hxge_status_t
1853 1854 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
1854 1855 p_hxge_dma_common_t *dma_buf_p,
1855 1856 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
1856 1857 {
1857 1858 p_hxge_dma_common_t dma_bufp, tmp_bufp;
1858 1859 p_hxge_dma_common_t dmap;
1859 1860 hxge_os_dma_handle_t tx_buf_dma_handle;
1860 1861 p_tx_ring_t tx_ring_p;
1861 1862 p_tx_msg_t tx_msg_ring;
1862 1863 hxge_status_t status = HXGE_OK;
1863 1864 int ddi_status = DDI_SUCCESS;
1864 1865 int i, j, index;
1865 1866 uint32_t size, bsize;
1866 1867 uint32_t nblocks, nmsgs;
1867 1868 char qname[TASKQ_NAMELEN];
1868 1869
1869 1870 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1870 1871 "==> hxge_map_txdma_channel_buf_ring"));
1871 1872
1872 1873 dma_bufp = tmp_bufp = *dma_buf_p;
1873 1874 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1874 1875 " hxge_map_txdma_channel_buf_ring: channel %d to map %d "
1875 1876 "chunks bufp $%p", channel, num_chunks, dma_bufp));
1876 1877
1877 1878 nmsgs = 0;
1878 1879 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
1879 1880 nmsgs += tmp_bufp->nblocks;
1880 1881 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1881 1882 "==> hxge_map_txdma_channel_buf_ring: channel %d "
1882 1883 "bufp $%p nblocks %d nmsgs %d",
1883 1884 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
1884 1885 }
1885 1886 if (!nmsgs) {
1886 1887 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1887 1888 "<== hxge_map_txdma_channel_buf_ring: channel %d "
1888 1889 "no msg blocks", channel));
1889 1890 status = HXGE_ERROR;
1890 1891
1891 1892 goto hxge_map_txdma_channel_buf_ring_exit;
1892 1893 }
1893 1894
1894 1895 tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
1895 1896 tx_ring_p->hxgep = hxgep;
1896 1897 (void) snprintf(qname, TASKQ_NAMELEN, "hxge_%d_%d",
1897 1898 hxgep->instance, channel);
1898 1899 tx_ring_p->taskq = ddi_taskq_create(hxgep->dip, qname, 1,
1899 1900 TASKQ_DEFAULTPRI, 0);
1900 1901 if (tx_ring_p->taskq == NULL) {
1901 1902 goto hxge_map_txdma_channel_buf_ring_fail1;
1902 1903 }
1903 1904
1904 1905 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
1905 1906 (void *) hxgep->interrupt_cookie);
1906 1907 /*
1907 1908 * Allocate transmit message rings and handles for packets not to be
1908 1909 * copied to premapped buffers.
1909 1910 */
1910 1911 size = nmsgs * sizeof (tx_msg_t);
1911 1912 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
1912 1913 for (i = 0; i < nmsgs; i++) {
1913 1914 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1914 1915 DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle);
1915 1916 if (ddi_status != DDI_SUCCESS) {
1916 1917 status |= HXGE_DDI_FAILED;
1917 1918 break;
1918 1919 }
1919 1920 }
1920 1921
1921 1922 if (i < nmsgs) {
1922 1923 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL,
1923 1924 "Allocate handles failed."));
1924 1925
1925 1926 goto hxge_map_txdma_channel_buf_ring_fail1;
1926 1927 }
1927 1928 tx_ring_p->tdc = channel;
1928 1929 tx_ring_p->tx_msg_ring = tx_msg_ring;
1929 1930 tx_ring_p->tx_ring_size = nmsgs;
1930 1931 tx_ring_p->num_chunks = num_chunks;
1931 1932 if (!hxge_tx_intr_thres) {
1932 1933 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4;
1933 1934 }
1934 1935 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
1935 1936 tx_ring_p->rd_index = 0;
1936 1937 tx_ring_p->wr_index = 0;
1937 1938 tx_ring_p->ring_head.value = 0;
1938 1939 tx_ring_p->ring_kick_tail.value = 0;
1939 1940 tx_ring_p->descs_pending = 0;
1940 1941
1941 1942 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1942 1943 "==> hxge_map_txdma_channel_buf_ring: channel %d "
1943 1944 "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
1944 1945 channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size));
1945 1946
1946 1947 /*
1947 1948 * Map in buffers from the buffer pool.
1948 1949 */
1949 1950 index = 0;
1950 1951 bsize = dma_bufp->block_size;
1951 1952
1952 1953 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: "
1953 1954 "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
1954 1955 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
1955 1956
1956 1957 for (i = 0; i < num_chunks; i++, dma_bufp++) {
1957 1958 bsize = dma_bufp->block_size;
1958 1959 nblocks = dma_bufp->nblocks;
1959 1960 tx_buf_dma_handle = dma_bufp->dma_handle;
1960 1961 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1961 1962 "==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
1962 1963 "size %d dma_bufp $%p",
1963 1964 i, sizeof (hxge_dma_common_t), dma_bufp));
1964 1965
1965 1966 for (j = 0; j < nblocks; j++) {
1966 1967 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
1967 1968 tx_msg_ring[index].offset_index = j;
1968 1969 dmap = &tx_msg_ring[index++].buf_dma;
1969 1970 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1970 1971 "==> hxge_map_txdma_channel_buf_ring: j %d"
1971 1972 "dmap $%p", i, dmap));
1972 1973 hxge_setup_dma_common(dmap, dma_bufp, 1, bsize);
1973 1974 }
1974 1975 }
1975 1976
1976 1977 if (i < num_chunks) {
1977 1978 status = HXGE_ERROR;
1978 1979
1979 1980 goto hxge_map_txdma_channel_buf_ring_fail1;
1980 1981 }
1981 1982
1982 1983 *tx_desc_p = tx_ring_p;
1983 1984
1984 1985 goto hxge_map_txdma_channel_buf_ring_exit;
1985 1986
1986 1987 hxge_map_txdma_channel_buf_ring_fail1:
1987 1988 if (tx_ring_p->taskq) {
1988 1989 ddi_taskq_destroy(tx_ring_p->taskq);
1989 1990 tx_ring_p->taskq = NULL;
1990 1991 }
1991 1992
1992 1993 index--;
1993 1994 for (; index >= 0; index--) {
1994 1995 if (tx_msg_ring[index].dma_handle != NULL) {
1995 1996 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
1996 1997 }
1997 1998 }
1998 1999 MUTEX_DESTROY(&tx_ring_p->lock);
1999 2000 KMEM_FREE(tx_msg_ring, size);
2000 2001 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2001 2002
2002 2003 status = HXGE_ERROR;
2003 2004
2004 2005 hxge_map_txdma_channel_buf_ring_exit:
2005 2006 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2006 2007 "<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
2007 2008
2008 2009 return (status);
2009 2010 }
2010 2011
2011 2012 /*ARGSUSED*/
2012 2013 static void
2013 2014 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p)
2014 2015 {
2015 2016 p_tx_msg_t tx_msg_ring;
2016 2017 p_tx_msg_t tx_msg_p;
2017 2018 int i;
2018 2019
2019 2020 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2020 2021 "==> hxge_unmap_txdma_channel_buf_ring"));
2021 2022 if (tx_ring_p == NULL) {
2022 2023 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2023 2024 "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2024 2025 return;
2025 2026 }
2026 2027 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2027 2028 "==> hxge_unmap_txdma_channel_buf_ring: channel %d",
2028 2029 tx_ring_p->tdc));
2029 2030
2030 2031 MUTEX_ENTER(&tx_ring_p->lock);
2031 2032 tx_msg_ring = tx_ring_p->tx_msg_ring;
2032 2033 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2033 2034 tx_msg_p = &tx_msg_ring[i];
2034 2035 if (tx_msg_p->flags.dma_type == USE_DVMA) {
2035 2036 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i));
2036 2037 (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1);
2037 2038 tx_msg_p->dvma_handle = NULL;
2038 2039 if (tx_ring_p->dvma_wr_index ==
2039 2040 tx_ring_p->dvma_wrap_mask) {
2040 2041 tx_ring_p->dvma_wr_index = 0;
2041 2042 } else {
2042 2043 tx_ring_p->dvma_wr_index++;
2043 2044 }
2044 2045 tx_ring_p->dvma_pending--;
2045 2046 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
2046 2047 if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) {
2047 2048 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: "
2048 2049 "ddi_dma_unbind_handle failed.");
2049 2050 }
2050 2051 }
2051 2052 if (tx_msg_p->tx_message != NULL) {
2052 2053 freemsg(tx_msg_p->tx_message);
2053 2054 tx_msg_p->tx_message = NULL;
2054 2055 }
2055 2056 }
2056 2057
2057 2058 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2058 2059 if (tx_msg_ring[i].dma_handle != NULL) {
2059 2060 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2060 2061 }
2061 2062 }
2062 2063 MUTEX_EXIT(&tx_ring_p->lock);
2063 2064
2064 2065 if (tx_ring_p->taskq) {
2065 2066 ddi_taskq_destroy(tx_ring_p->taskq);
2066 2067 tx_ring_p->taskq = NULL;
2067 2068 }
2068 2069
2069 2070 MUTEX_DESTROY(&tx_ring_p->lock);
2070 2071 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2071 2072 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2072 2073
2073 2074 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2074 2075 "<== hxge_unmap_txdma_channel_buf_ring"));
2075 2076 }
2076 2077
2077 2078 static hxge_status_t
2078 2079 hxge_txdma_hw_start(p_hxge_t hxgep)
2079 2080 {
2080 2081 int i, ndmas;
2081 2082 uint16_t channel;
2082 2083 p_tx_rings_t tx_rings;
2083 2084 p_tx_ring_t *tx_desc_rings;
2084 2085 p_tx_mbox_areas_t tx_mbox_areas_p;
2085 2086 p_tx_mbox_t *tx_mbox_p;
2086 2087 hxge_status_t status = HXGE_OK;
2087 2088 uint64_t tmp;
2088 2089
2089 2090 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start"));
2090 2091
2091 2092 /*
2092 2093 * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
2093 2094 * 3. Scrub memory and check for errors.
2094 2095 */
2095 2096 (void) hxge_tx_vmac_disable(hxgep);
2096 2097
2097 2098 /*
2098 2099 * Clear the error status
2099 2100 */
2100 2101 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2101 2102
2102 2103 /*
2103 2104 * Scrub the rtab memory for the TDC and reset the TDC.
2104 2105 */
2105 2106 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL);
2106 2107 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL);
2107 2108
2108 2109 for (i = 0; i < 256; i++) {
2109 2110 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2110 2111 (uint64_t)i);
2111 2112
2112 2113 /*
2113 2114 * Write the command register with an indirect read instruction
2114 2115 */
2115 2116 tmp = (0x1ULL << 30) | i;
2116 2117 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2117 2118
2118 2119 /*
2119 2120 * Wait for status done
2120 2121 */
2121 2122 tmp = 0;
2122 2123 do {
2123 2124 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2124 2125 &tmp);
2125 2126 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2126 2127 }
2127 2128
2128 2129 for (i = 0; i < 256; i++) {
2129 2130 /*
2130 2131 * Write the command register with an indirect read instruction
2131 2132 */
2132 2133 tmp = (0x1ULL << 30) | i;
2133 2134 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2134 2135
2135 2136 /*
2136 2137 * Wait for status done
2137 2138 */
2138 2139 tmp = 0;
2139 2140 do {
2140 2141 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2141 2142 &tmp);
2142 2143 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2143 2144
2144 2145 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp);
2145 2146 if (0x1ff00ULL != (0x1ffffULL & tmp)) {
2146 2147 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2147 2148 "unexpected data (hi), entry: %x, value: 0x%0llx\n",
2148 2149 i, (unsigned long long)tmp));
2149 2150 status = HXGE_ERROR;
2150 2151 }
2151 2152
2152 2153 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp);
2153 2154 if (tmp != 0) {
2154 2155 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2155 2156 "unexpected data (lo), entry: %x\n", i));
2156 2157 status = HXGE_ERROR;
2157 2158 }
2158 2159
2159 2160 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2160 2161 if (tmp != 0) {
2161 2162 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2162 2163 "parity error, entry: %x, val 0x%llx\n",
2163 2164 i, (unsigned long long)tmp));
2164 2165 status = HXGE_ERROR;
2165 2166 }
2166 2167
2167 2168 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2168 2169 if (tmp != 0) {
2169 2170 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2170 2171 "parity error, entry: %x\n", i));
2171 2172 status = HXGE_ERROR;
2172 2173 }
2173 2174 }
2174 2175
2175 2176 if (status != HXGE_OK)
2176 2177 goto hxge_txdma_hw_start_exit;
2177 2178
2178 2179 /*
2179 2180 * Reset FIFO Error Status for the TDC and enable FIFO error events.
2180 2181 */
2181 2182 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2182 2183 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0);
2183 2184
2184 2185 /*
2185 2186 * Initialize the Transmit DMAs.
2186 2187 */
2187 2188 tx_rings = hxgep->tx_rings;
2188 2189 if (tx_rings == NULL) {
2189 2190 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2190 2191 "<== hxge_txdma_hw_start: NULL ring pointer"));
2191 2192 return (HXGE_ERROR);
2192 2193 }
2193 2194
2194 2195 tx_desc_rings = tx_rings->rings;
2195 2196 if (tx_desc_rings == NULL) {
2196 2197 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2197 2198 "<== hxge_txdma_hw_start: NULL ring pointers"));
2198 2199 return (HXGE_ERROR);
2199 2200 }
2200 2201 ndmas = tx_rings->ndmas;
2201 2202 if (!ndmas) {
2202 2203 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2203 2204 "<== hxge_txdma_hw_start: no dma channel allocated"));
2204 2205 return (HXGE_ERROR);
2205 2206 }
2206 2207 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: "
2207 2208 "tx_rings $%p tx_desc_rings $%p ndmas %d",
2208 2209 tx_rings, tx_desc_rings, ndmas));
2209 2210
2210 2211 tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2211 2212 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2212 2213
2213 2214 /*
2214 2215 * Init the DMAs.
2215 2216 */
2216 2217 for (i = 0; i < ndmas; i++) {
2217 2218 channel = tx_desc_rings[i]->tdc;
2218 2219 status = hxge_txdma_start_channel(hxgep, channel,
2219 2220 (p_tx_ring_t)tx_desc_rings[i],
2220 2221 (p_tx_mbox_t)tx_mbox_p[i]);
2221 2222 if (status != HXGE_OK) {
2222 2223 goto hxge_txdma_hw_start_fail1;
2223 2224 }
2224 2225 }
2225 2226
2226 2227 (void) hxge_tx_vmac_enable(hxgep);
2227 2228
2228 2229 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2229 2230 "==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
2230 2231 hxgep->tx_rings, hxgep->tx_rings->rings));
2231 2232 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2232 2233 "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
2233 2234 hxgep->tx_rings, tx_desc_rings));
2234 2235
2235 2236 goto hxge_txdma_hw_start_exit;
2236 2237
2237 2238 hxge_txdma_hw_start_fail1:
2238 2239 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2239 2240 "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
2240 2241 status, channel, i));
2241 2242
2242 2243 for (; i >= 0; i--) {
2243 2244 channel = tx_desc_rings[i]->tdc,
2244 2245 (void) hxge_txdma_stop_channel(hxgep, channel,
2245 2246 (p_tx_ring_t)tx_desc_rings[i],
2246 2247 (p_tx_mbox_t)tx_mbox_p[i]);
2247 2248 }
2248 2249
2249 2250 hxge_txdma_hw_start_exit:
2250 2251 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2251 2252 "==> hxge_txdma_hw_start: (status 0x%x)", status));
2252 2253
2253 2254 return (status);
2254 2255 }
2255 2256
2256 2257 static void
2257 2258 hxge_txdma_hw_stop(p_hxge_t hxgep)
2258 2259 {
2259 2260 int i, ndmas;
2260 2261 uint16_t channel;
2261 2262 p_tx_rings_t tx_rings;
2262 2263 p_tx_ring_t *tx_desc_rings;
2263 2264 p_tx_mbox_areas_t tx_mbox_areas_p;
2264 2265 p_tx_mbox_t *tx_mbox_p;
2265 2266
2266 2267 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop"));
2267 2268
2268 2269 tx_rings = hxgep->tx_rings;
2269 2270 if (tx_rings == NULL) {
2270 2271 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2271 2272 "<== hxge_txdma_hw_stop: NULL ring pointer"));
2272 2273 return;
2273 2274 }
2274 2275
2275 2276 tx_desc_rings = tx_rings->rings;
2276 2277 if (tx_desc_rings == NULL) {
2277 2278 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2278 2279 "<== hxge_txdma_hw_stop: NULL ring pointers"));
2279 2280 return;
2280 2281 }
2281 2282
2282 2283 ndmas = tx_rings->ndmas;
2283 2284 if (!ndmas) {
2284 2285 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2285 2286 "<== hxge_txdma_hw_stop: no dma channel allocated"));
2286 2287 return;
2287 2288 }
2288 2289
2289 2290 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2290 2291 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2291 2292
2292 2293 tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2293 2294 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2294 2295
2295 2296 for (i = 0; i < ndmas; i++) {
2296 2297 channel = tx_desc_rings[i]->tdc;
2297 2298 (void) hxge_txdma_stop_channel(hxgep, channel,
2298 2299 (p_tx_ring_t)tx_desc_rings[i],
2299 2300 (p_tx_mbox_t)tx_mbox_p[i]);
2300 2301 }
2301 2302
2302 2303 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2303 2304 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2304 2305 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop"));
2305 2306 }
2306 2307
2307 2308 static hxge_status_t
2308 2309 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
2309 2310 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2310 2311 {
2311 2312 hxge_status_t status = HXGE_OK;
2312 2313
2313 2314 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2314 2315 "==> hxge_txdma_start_channel (channel %d)", channel));
2315 2316 /*
2316 2317 * TXDMA/TXC must be in stopped state.
2317 2318 */
2318 2319 (void) hxge_txdma_stop_inj_err(hxgep, channel);
2319 2320
2320 2321 /*
2321 2322 * Reset TXDMA channel
2322 2323 */
2323 2324 tx_ring_p->tx_cs.value = 0;
2324 2325 tx_ring_p->tx_cs.bits.reset = 1;
2325 2326 status = hxge_reset_txdma_channel(hxgep, channel,
2326 2327 tx_ring_p->tx_cs.value);
2327 2328 if (status != HXGE_OK) {
2328 2329 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2329 2330 "==> hxge_txdma_start_channel (channel %d)"
2330 2331 " reset channel failed 0x%x", channel, status));
2331 2332
2332 2333 goto hxge_txdma_start_channel_exit;
2333 2334 }
2334 2335
2335 2336 /*
2336 2337 * Initialize the TXDMA channel specific FZC control configurations.
2337 2338 * These FZC registers are pertaining to each TX channel (i.e. logical
2338 2339 * pages).
2339 2340 */
2340 2341 status = hxge_init_fzc_txdma_channel(hxgep, channel,
2341 2342 tx_ring_p, tx_mbox_p);
2342 2343 if (status != HXGE_OK) {
2343 2344 goto hxge_txdma_start_channel_exit;
2344 2345 }
2345 2346
2346 2347 /*
2347 2348 * Initialize the event masks.
2348 2349 */
2349 2350 tx_ring_p->tx_evmask.value = 0;
2350 2351 status = hxge_init_txdma_channel_event_mask(hxgep,
2351 2352 channel, &tx_ring_p->tx_evmask);
2352 2353 if (status != HXGE_OK) {
2353 2354 goto hxge_txdma_start_channel_exit;
2354 2355 }
2355 2356
2356 2357 /*
2357 2358 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2358 2359 * channels and enable each DMA channel.
2359 2360 */
2360 2361 status = hxge_enable_txdma_channel(hxgep, channel,
2361 2362 tx_ring_p, tx_mbox_p);
2362 2363 if (status != HXGE_OK) {
2363 2364 goto hxge_txdma_start_channel_exit;
2364 2365 }
2365 2366
2366 2367 hxge_txdma_start_channel_exit:
2367 2368 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel"));
2368 2369
2369 2370 return (status);
2370 2371 }
2371 2372
2372 2373 /*ARGSUSED*/
2373 2374 static hxge_status_t
2374 2375 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
2375 2376 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2376 2377 {
2377 2378 int status = HXGE_OK;
2378 2379
2379 2380 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2380 2381 "==> hxge_txdma_stop_channel: channel %d", channel));
2381 2382
2382 2383 /*
2383 2384 * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
2384 2385 * not set, the TXDMA reset state will not be set if reset TXDMA.
2385 2386 */
2386 2387 (void) hxge_txdma_stop_inj_err(hxgep, channel);
2387 2388
2388 2389 /*
2389 2390 * Reset TXDMA channel
2390 2391 */
2391 2392 tx_ring_p->tx_cs.value = 0;
2392 2393 tx_ring_p->tx_cs.bits.reset = 1;
2393 2394 status = hxge_reset_txdma_channel(hxgep, channel,
2394 2395 tx_ring_p->tx_cs.value);
2395 2396 if (status != HXGE_OK) {
2396 2397 goto hxge_txdma_stop_channel_exit;
2397 2398 }
2398 2399
2399 2400 hxge_txdma_stop_channel_exit:
2400 2401 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel"));
2401 2402
2402 2403 return (status);
2403 2404 }
2404 2405
2405 2406 static p_tx_ring_t
2406 2407 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel)
2407 2408 {
2408 2409 int index, ndmas;
2409 2410 uint16_t tdc;
2410 2411 p_tx_rings_t tx_rings;
2411 2412
2412 2413 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring"));
2413 2414
2414 2415 tx_rings = hxgep->tx_rings;
2415 2416 if (tx_rings == NULL) {
2416 2417 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2417 2418 "<== hxge_txdma_get_ring: NULL ring pointer"));
2418 2419 return (NULL);
2419 2420 }
2420 2421 ndmas = tx_rings->ndmas;
2421 2422 if (!ndmas) {
2422 2423 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2423 2424 "<== hxge_txdma_get_ring: no channel allocated"));
2424 2425 return (NULL);
2425 2426 }
2426 2427 if (tx_rings->rings == NULL) {
2427 2428 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2428 2429 "<== hxge_txdma_get_ring: NULL rings pointer"));
2429 2430 return (NULL);
2430 2431 }
2431 2432 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: "
2432 2433 "tx_rings $%p tx_desc_rings $%p ndmas %d",
2433 2434 tx_rings, tx_rings, ndmas));
2434 2435
2435 2436 for (index = 0; index < ndmas; index++) {
2436 2437 tdc = tx_rings->rings[index]->tdc;
2437 2438 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2438 2439 "==> hxge_fixup_txdma_rings: channel %d", tdc));
2439 2440 if (channel == tdc) {
2440 2441 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2441 2442 "<== hxge_txdma_get_ring: tdc %d ring $%p",
2442 2443 tdc, tx_rings->rings[index]));
2443 2444 return (p_tx_ring_t)(tx_rings->rings[index]);
2444 2445 }
2445 2446 }
2446 2447
2447 2448 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring"));
2448 2449
2449 2450 return (NULL);
2450 2451 }
2451 2452
2452 2453 static p_tx_mbox_t
2453 2454 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel)
2454 2455 {
2455 2456 int index, tdc, ndmas;
2456 2457 p_tx_rings_t tx_rings;
2457 2458 p_tx_mbox_areas_t tx_mbox_areas_p;
2458 2459 p_tx_mbox_t *tx_mbox_p;
2459 2460
2460 2461 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox"));
2461 2462
2462 2463 tx_rings = hxgep->tx_rings;
2463 2464 if (tx_rings == NULL) {
2464 2465 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2465 2466 "<== hxge_txdma_get_mbox: NULL ring pointer"));
2466 2467 return (NULL);
2467 2468 }
2468 2469 tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2469 2470 if (tx_mbox_areas_p == NULL) {
2470 2471 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2471 2472 "<== hxge_txdma_get_mbox: NULL mbox pointer"));
2472 2473 return (NULL);
2473 2474 }
2474 2475 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2475 2476
2476 2477 ndmas = tx_rings->ndmas;
2477 2478 if (!ndmas) {
2478 2479 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2479 2480 "<== hxge_txdma_get_mbox: no channel allocated"));
2480 2481 return (NULL);
2481 2482 }
2482 2483 if (tx_rings->rings == NULL) {
2483 2484 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2484 2485 "<== hxge_txdma_get_mbox: NULL rings pointer"));
2485 2486 return (NULL);
2486 2487 }
2487 2488 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: "
2488 2489 "tx_rings $%p tx_desc_rings $%p ndmas %d",
2489 2490 tx_rings, tx_rings, ndmas));
2490 2491
2491 2492 for (index = 0; index < ndmas; index++) {
2492 2493 tdc = tx_rings->rings[index]->tdc;
2493 2494 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2494 2495 "==> hxge_txdma_get_mbox: channel %d", tdc));
2495 2496 if (channel == tdc) {
2496 2497 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2497 2498 "<== hxge_txdma_get_mbox: tdc %d ring $%p",
2498 2499 tdc, tx_rings->rings[index]));
2499 2500 return (p_tx_mbox_t)(tx_mbox_p[index]);
2500 2501 }
2501 2502 }
2502 2503
2503 2504 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox"));
2504 2505
2505 2506 return (NULL);
2506 2507 }
2507 2508
2508 2509 /*ARGSUSED*/
2509 2510 static hxge_status_t
2510 2511 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
2511 2512 tdc_stat_t cs)
2512 2513 {
2513 2514 hpi_handle_t handle;
2514 2515 uint8_t channel;
2515 2516 p_tx_ring_t *tx_rings;
2516 2517 p_tx_ring_t tx_ring_p;
2517 2518 p_hxge_tx_ring_stats_t tdc_stats;
2518 2519 boolean_t txchan_fatal = B_FALSE;
2519 2520 hxge_status_t status = HXGE_OK;
2520 2521 tdc_drop_cnt_t drop_cnt;
2521 2522
2522 2523 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts"));
2523 2524 handle = HXGE_DEV_HPI_HANDLE(hxgep);
2524 2525 channel = ldvp->channel;
2525 2526
2526 2527 tx_rings = hxgep->tx_rings->rings;
2527 2528 tx_ring_p = tx_rings[index];
2528 2529 tdc_stats = tx_ring_p->tdc_stats;
2529 2530
2530 2531 /* Get the error counts if any */
2531 2532 TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value);
2532 2533 tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count;
2533 2534 tdc_stats->count_runt += drop_cnt.bits.runt_count;
2534 2535 tdc_stats->count_abort += drop_cnt.bits.abort_count;
2535 2536
2536 2537 if (cs.bits.peu_resp_err) {
2537 2538 tdc_stats->peu_resp_err++;
2538 2539 HXGE_FM_REPORT_ERROR(hxgep, channel,
2539 2540 HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR);
2540 2541 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2541 2542 "==> hxge_tx_err_evnts(channel %d): "
2542 2543 "fatal error: peu_resp_err", channel));
2543 2544 txchan_fatal = B_TRUE;
2544 2545 }
2545 2546
2546 2547 if (cs.bits.pkt_size_hdr_err) {
2547 2548 tdc_stats->pkt_size_hdr_err++;
2548 2549 HXGE_FM_REPORT_ERROR(hxgep, channel,
2549 2550 HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR);
2550 2551 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2551 2552 "==> hxge_tx_err_evnts(channel %d): "
2552 2553 "fatal error: pkt_size_hdr_err", channel));
2553 2554 txchan_fatal = B_TRUE;
2554 2555 }
2555 2556
2556 2557 if (cs.bits.runt_pkt_drop_err) {
2557 2558 tdc_stats->runt_pkt_drop_err++;
2558 2559 HXGE_FM_REPORT_ERROR(hxgep, channel,
2559 2560 HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR);
2560 2561 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2561 2562 "==> hxge_tx_err_evnts(channel %d): "
2562 2563 "fatal error: runt_pkt_drop_err", channel));
2563 2564 txchan_fatal = B_TRUE;
2564 2565 }
2565 2566
2566 2567 if (cs.bits.pkt_size_err) {
2567 2568 tdc_stats->pkt_size_err++;
2568 2569 HXGE_FM_REPORT_ERROR(hxgep, channel,
2569 2570 HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
2570 2571 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2571 2572 "==> hxge_tx_err_evnts(channel %d): "
2572 2573 "fatal error: pkt_size_err", channel));
2573 2574 txchan_fatal = B_TRUE;
2574 2575 }
2575 2576
2576 2577 if (cs.bits.tx_rng_oflow) {
2577 2578 tdc_stats->tx_rng_oflow++;
2578 2579 if (tdc_stats->tx_rng_oflow)
2579 2580 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2580 2581 "==> hxge_tx_err_evnts(channel %d): "
2581 2582 "fatal error: tx_rng_oflow", channel));
2582 2583 }
2583 2584
2584 2585 if (cs.bits.pref_par_err) {
2585 2586 tdc_stats->pref_par_err++;
2586 2587
2587 2588 /* Get the address of parity error read data */
2588 2589 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG,
2589 2590 channel, &tdc_stats->errlog.value);
2590 2591
2591 2592 HXGE_FM_REPORT_ERROR(hxgep, channel,
2592 2593 HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR);
2593 2594 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2594 2595 "==> hxge_tx_err_evnts(channel %d): "
2595 2596 "fatal error: pref_par_err", channel));
2596 2597 txchan_fatal = B_TRUE;
2597 2598 }
2598 2599
2599 2600 if (cs.bits.tdr_pref_cpl_to) {
2600 2601 tdc_stats->tdr_pref_cpl_to++;
2601 2602 HXGE_FM_REPORT_ERROR(hxgep, channel,
2602 2603 HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO);
2603 2604 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2604 2605 "==> hxge_tx_err_evnts(channel %d): "
2605 2606 "fatal error: tdr_pref_cpl_to", channel));
2606 2607 txchan_fatal = B_TRUE;
2607 2608 }
2608 2609
2609 2610 if (cs.bits.pkt_cpl_to) {
2610 2611 tdc_stats->pkt_cpl_to++;
2611 2612 HXGE_FM_REPORT_ERROR(hxgep, channel,
2612 2613 HXGE_FM_EREPORT_TDMC_PKT_CPL_TO);
2613 2614 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2614 2615 "==> hxge_tx_err_evnts(channel %d): "
2615 2616 "fatal error: pkt_cpl_to", channel));
2616 2617 txchan_fatal = B_TRUE;
2617 2618 }
2618 2619
2619 2620 if (cs.bits.invalid_sop) {
2620 2621 tdc_stats->invalid_sop++;
2621 2622 HXGE_FM_REPORT_ERROR(hxgep, channel,
2622 2623 HXGE_FM_EREPORT_TDMC_INVALID_SOP);
2623 2624 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2624 2625 "==> hxge_tx_err_evnts(channel %d): "
2625 2626 "fatal error: invalid_sop", channel));
2626 2627 txchan_fatal = B_TRUE;
2627 2628 }
2628 2629
2629 2630 if (cs.bits.unexpected_sop) {
2630 2631 tdc_stats->unexpected_sop++;
2631 2632 HXGE_FM_REPORT_ERROR(hxgep, channel,
2632 2633 HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP);
2633 2634 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2634 2635 "==> hxge_tx_err_evnts(channel %d): "
2635 2636 "fatal error: unexpected_sop", channel));
2636 2637 txchan_fatal = B_TRUE;
2637 2638 }
2638 2639
2639 2640 /* Clear error injection source in case this is an injected error */
2640 2641 TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0);
2641 2642
2642 2643 if (txchan_fatal) {
2643 2644 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2644 2645 " hxge_tx_err_evnts: "
2645 2646 " fatal error on channel %d cs 0x%llx\n",
2646 2647 channel, cs.value));
2647 2648 status = hxge_txdma_fatal_err_recover(hxgep, channel,
2648 2649 tx_ring_p);
2649 2650 if (status == HXGE_OK) {
2650 2651 FM_SERVICE_RESTORED(hxgep);
2651 2652 }
2652 2653 }
2653 2654
2654 2655 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts"));
2655 2656
2656 2657 return (status);
2657 2658 }
2658 2659
2659 2660 hxge_status_t
2660 2661 hxge_txdma_handle_sys_errors(p_hxge_t hxgep)
2661 2662 {
2662 2663 hpi_handle_t handle;
2663 2664 hxge_status_t status = HXGE_OK;
2664 2665 tdc_fifo_err_stat_t fifo_stat;
2665 2666 hxge_tdc_sys_stats_t *tdc_sys_stats;
2666 2667
2667 2668 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors"));
2668 2669
2669 2670 handle = HXGE_DEV_HPI_HANDLE(hxgep);
2670 2671
2671 2672 /*
2672 2673 * The FIFO is shared by all channels.
2673 2674 * Get the status of Reorder Buffer and Reorder Table Buffer Errors
2674 2675 */
2675 2676 HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value);
2676 2677
2677 2678 /*
2678 2679 * Clear the error bits. Note that writing a 1 clears the bit. Writing
2679 2680 * a 0 does nothing.
2680 2681 */
2681 2682 HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value);
2682 2683
2683 2684 tdc_sys_stats = &hxgep->statsp->tdc_sys_stats;
2684 2685 if (fifo_stat.bits.reord_tbl_par_err) {
2685 2686 tdc_sys_stats->reord_tbl_par_err++;
2686 2687 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2687 2688 HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR);
2688 2689 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2689 2690 "==> hxge_txdma_handle_sys_errors: fatal error: "
2690 2691 "reord_tbl_par_err"));
2691 2692 }
2692 2693
2693 2694 if (fifo_stat.bits.reord_buf_ded_err) {
2694 2695 tdc_sys_stats->reord_buf_ded_err++;
2695 2696 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2696 2697 HXGE_FM_EREPORT_TDMC_REORD_BUF_DED);
2697 2698 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2698 2699 "==> hxge_txdma_handle_sys_errors: "
2699 2700 "fatal error: reord_buf_ded_err"));
2700 2701 }
2701 2702
2702 2703 if (fifo_stat.bits.reord_buf_sec_err) {
2703 2704 tdc_sys_stats->reord_buf_sec_err++;
2704 2705 if (tdc_sys_stats->reord_buf_sec_err == 1)
2705 2706 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2706 2707 "==> hxge_txdma_handle_sys_errors: "
2707 2708 "reord_buf_sec_err"));
2708 2709 }
2709 2710
2710 2711 if (fifo_stat.bits.reord_tbl_par_err ||
2711 2712 fifo_stat.bits.reord_buf_ded_err) {
2712 2713 status = hxge_tx_port_fatal_err_recover(hxgep);
2713 2714 if (status == HXGE_OK) {
2714 2715 FM_SERVICE_RESTORED(hxgep);
2715 2716 }
2716 2717 }
2717 2718
2718 2719 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors"));
2719 2720
2720 2721 return (status);
2721 2722 }
2722 2723
2723 2724 static hxge_status_t
2724 2725 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel,
2725 2726 p_tx_ring_t tx_ring_p)
2726 2727 {
2727 2728 hpi_handle_t handle;
2728 2729 hpi_status_t rs = HPI_SUCCESS;
2729 2730 p_tx_mbox_t tx_mbox_p;
2730 2731 hxge_status_t status = HXGE_OK;
2731 2732
2732 2733 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2733 2734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2734 2735 "Recovering from TxDMAChannel#%d error...", channel));
2735 2736
2736 2737 /*
2737 2738 * Stop the dma channel waits for the stop done. If the stop done bit
2738 2739 * is not set, then create an error.
2739 2740 */
2740 2741 handle = HXGE_DEV_HPI_HANDLE(hxgep);
2741 2742 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)",
2742 2743 channel));
2743 2744 MUTEX_ENTER(&tx_ring_p->lock);
2744 2745 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2745 2746 if (rs != HPI_SUCCESS) {
2746 2747 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2747 2748 "==> hxge_txdma_fatal_err_recover (channel %d): "
2748 2749 "stop failed ", channel));
2749 2750
2750 2751 goto fail;
2751 2752 }
2752 2753 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)",
2753 2754 channel));
2754 2755 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2755 2756
2756 2757 /*
2757 2758 * Reset TXDMA channel
2758 2759 */
2759 2760 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)",
2760 2761 channel));
2761 2762 if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
2762 2763 HPI_SUCCESS) {
2763 2764 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2764 2765 "==> hxge_txdma_fatal_err_recover (channel %d)"
2765 2766 " reset channel failed 0x%x", channel, rs));
2766 2767
2767 2768 goto fail;
2768 2769 }
2769 2770 /*
2770 2771 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
2771 2772 * overflow fatal error if tail is not set to 0 after reset!
2772 2773 */
2773 2774 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
2774 2775
2775 2776 /*
2776 2777 * Restart TXDMA channel
2777 2778 *
2778 2779 * Initialize the TXDMA channel specific FZC control configurations.
2779 2780 * These FZC registers are pertaining to each TX channel (i.e. logical
2780 2781 * pages).
2781 2782 */
2782 2783 tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel);
2783 2784 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)",
2784 2785 channel));
2785 2786 status = hxge_init_fzc_txdma_channel(hxgep, channel,
2786 2787 tx_ring_p, tx_mbox_p);
2787 2788 if (status != HXGE_OK)
2788 2789 goto fail;
2789 2790
2790 2791 /*
2791 2792 * Initialize the event masks.
2792 2793 */
2793 2794 tx_ring_p->tx_evmask.value = 0;
2794 2795 status = hxge_init_txdma_channel_event_mask(hxgep, channel,
2795 2796 &tx_ring_p->tx_evmask);
2796 2797 if (status != HXGE_OK)
2797 2798 goto fail;
2798 2799
2799 2800 tx_ring_p->wr_index_wrap = B_FALSE;
2800 2801 tx_ring_p->wr_index = 0;
2801 2802 tx_ring_p->rd_index = 0;
2802 2803
2803 2804 /*
2804 2805 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2805 2806 * channels and enable each DMA channel.
2806 2807 */
2807 2808 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)",
2808 2809 channel));
2809 2810 status = hxge_enable_txdma_channel(hxgep, channel,
2810 2811 tx_ring_p, tx_mbox_p);
2811 2812 MUTEX_EXIT(&tx_ring_p->lock);
2812 2813 if (status != HXGE_OK)
2813 2814 goto fail;
2814 2815
2815 2816 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2816 2817 "Recovery Successful, TxDMAChannel#%d Restored", channel));
2817 2818 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2818 2819
2819 2820 return (HXGE_OK);
2820 2821
2821 2822 fail:
2822 2823 MUTEX_EXIT(&tx_ring_p->lock);
2823 2824 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2824 2825 "hxge_txdma_fatal_err_recover (channel %d): "
2825 2826 "failed to recover this txdma channel", channel));
2826 2827 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2827 2828
2828 2829 return (status);
2829 2830 }
2830 2831
2831 2832 static hxge_status_t
2832 2833 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)
2833 2834 {
2834 2835 hpi_handle_t handle;
2835 2836 hpi_status_t rs = HPI_SUCCESS;
2836 2837 hxge_status_t status = HXGE_OK;
2837 2838 p_tx_ring_t *tx_desc_rings;
2838 2839 p_tx_rings_t tx_rings;
2839 2840 p_tx_ring_t tx_ring_p;
2840 2841 int i, ndmas;
2841 2842 uint16_t channel;
2842 2843 block_reset_t reset_reg;
2843 2844
2844 2845 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2845 2846 "==> hxge_tx_port_fatal_err_recover"));
2846 2847 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2847 2848 "Recovering from TxPort error..."));
2848 2849
2849 2850 handle = HXGE_DEV_HPI_HANDLE(hxgep);
2850 2851
2851 2852 /* Reset TDC block from PEU for this fatal error */
2852 2853 reset_reg.value = 0;
2853 2854 reset_reg.bits.tdc_rst = 1;
2854 2855 HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value);
2855 2856
2856 2857 HXGE_DELAY(1000);
2857 2858
2858 2859 /*
2859 2860 * Stop the dma channel waits for the stop done. If the stop done bit
2860 2861 * is not set, then create an error.
2861 2862 */
2862 2863 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels..."));
2863 2864
2864 2865 tx_rings = hxgep->tx_rings;
2865 2866 tx_desc_rings = tx_rings->rings;
2866 2867 ndmas = tx_rings->ndmas;
2867 2868
2868 2869 for (i = 0; i < ndmas; i++) {
2869 2870 if (tx_desc_rings[i] == NULL) {
2870 2871 continue;
2871 2872 }
2872 2873 tx_ring_p = tx_rings->rings[i];
2873 2874 MUTEX_ENTER(&tx_ring_p->lock);
2874 2875 }
2875 2876
2876 2877 for (i = 0; i < ndmas; i++) {
2877 2878 if (tx_desc_rings[i] == NULL) {
2878 2879 continue;
2879 2880 }
2880 2881 channel = tx_desc_rings[i]->tdc;
2881 2882 tx_ring_p = tx_rings->rings[i];
2882 2883 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2883 2884 if (rs != HPI_SUCCESS) {
2884 2885 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2885 2886 "==> hxge_txdma_fatal_err_recover (channel %d): "
2886 2887 "stop failed ", channel));
2887 2888
2888 2889 goto fail;
2889 2890 }
2890 2891 }
2891 2892
2892 2893 /*
2893 2894 * Do reclaim on all of th DMAs.
2894 2895 */
2895 2896 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels..."));
2896 2897 for (i = 0; i < ndmas; i++) {
2897 2898 if (tx_desc_rings[i] == NULL) {
2898 2899 continue;
2899 2900 }
2900 2901 tx_ring_p = tx_rings->rings[i];
2901 2902 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2902 2903 }
2903 2904
2904 2905 /* Restart the TDC */
2905 2906 if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK)
2906 2907 goto fail;
2907 2908
2908 2909 for (i = 0; i < ndmas; i++) {
2909 2910 if (tx_desc_rings[i] == NULL) {
2910 2911 continue;
2911 2912 }
2912 2913 tx_ring_p = tx_rings->rings[i];
2913 2914 MUTEX_EXIT(&tx_ring_p->lock);
2914 2915 }
2915 2916
2916 2917 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2917 2918 "Recovery Successful, TxPort Restored"));
2918 2919 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2919 2920 "<== hxge_tx_port_fatal_err_recover"));
2920 2921 return (HXGE_OK);
2921 2922
2922 2923 fail:
2923 2924 for (i = 0; i < ndmas; i++) {
2924 2925 if (tx_desc_rings[i] == NULL) {
2925 2926 continue;
2926 2927 }
2927 2928 tx_ring_p = tx_rings->rings[i];
2928 2929 MUTEX_EXIT(&tx_ring_p->lock);
2929 2930 }
2930 2931
2931 2932 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2932 2933 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2933 2934 "hxge_txdma_fatal_err_recover (channel %d): "
2934 2935 "failed to recover this txdma channel"));
2935 2936
2936 2937 return (status);
2937 2938 }
↓ open down ↓ |
2137 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX