Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nxge/nxge_send.c
+++ new/usr/src/uts/common/io/nxge/nxge_send.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/mac_provider.h>
27 27 #include <sys/nxge/nxge_impl.h>
28 28 #include <sys/nxge/nxge_hio.h>
29 29 #include <npi_tx_wr64.h>
30 30
31 31 /* Software LSO required header files */
32 32 #include <netinet/tcp.h>
33 33 #include <inet/ip_impl.h>
34 34 #include <inet/tcp.h>
35 35
36 36 extern uint64_t mac_pkt_hash(uint_t, mblk_t *mp, uint8_t policy,
37 37 boolean_t is_outbound);
38 38
39 39 static mblk_t *nxge_lso_eliminate(mblk_t *);
40 40 static mblk_t *nxge_do_softlso(mblk_t *mp, uint32_t mss);
41 41 static void nxge_lso_info_get(mblk_t *, uint32_t *, uint32_t *);
42 42 static void nxge_hcksum_retrieve(mblk_t *,
43 43 uint32_t *, uint32_t *, uint32_t *,
44 44 uint32_t *, uint32_t *);
45 45 static uint32_t nxge_csgen(uint16_t *, int);
46 46
47 47 extern uint32_t nxge_reclaim_pending;
48 48 extern uint32_t nxge_bcopy_thresh;
49 49 extern uint32_t nxge_dvma_thresh;
50 50 extern uint32_t nxge_dma_stream_thresh;
51 51 extern uint32_t nxge_tx_minfree;
52 52 extern uint32_t nxge_tx_intr_thres;
53 53 extern uint32_t nxge_tx_max_gathers;
54 54 extern uint32_t nxge_tx_tiny_pack;
55 55 extern uint32_t nxge_tx_use_bcopy;
56 56 extern nxge_tx_mode_t nxge_tx_scheme;
57 57 uint32_t nxge_lso_kick_cnt = 2;
58 58
59 59
60 60 void
61 61 nxge_tx_ring_task(void *arg)
62 62 {
63 63 p_tx_ring_t ring = (p_tx_ring_t)arg;
64 64
65 65 ASSERT(ring->tx_ring_handle != NULL);
66 66
67 67 MUTEX_ENTER(&ring->lock);
68 68 (void) nxge_txdma_reclaim(ring->nxgep, ring, 0);
69 69 MUTEX_EXIT(&ring->lock);
70 70
71 71 if (!ring->tx_ring_offline) {
72 72 mac_tx_ring_update(ring->nxgep->mach, ring->tx_ring_handle);
73 73 }
74 74 }
75 75
76 76 static void
77 77 nxge_tx_ring_dispatch(p_tx_ring_t ring)
78 78 {
79 79 /*
80 80 * Kick the ring task to reclaim some buffers.
81 81 */
82 82 (void) ddi_taskq_dispatch(ring->taskq,
83 83 nxge_tx_ring_task, (void *)ring, DDI_SLEEP);
84 84 }
85 85
86 86 mblk_t *
87 87 nxge_tx_ring_send(void *arg, mblk_t *mp)
88 88 {
89 89 p_nxge_ring_handle_t nrhp = (p_nxge_ring_handle_t)arg;
90 90 p_nxge_t nxgep;
91 91 p_tx_ring_t tx_ring_p;
92 92 int status, channel;
93 93
94 94 ASSERT(nrhp != NULL);
95 95 nxgep = nrhp->nxgep;
96 96 channel = nxgep->pt_config.hw_config.tdc.start + nrhp->index;
97 97 tx_ring_p = nxgep->tx_rings->rings[channel];
98 98
99 99 /*
100 100 * We may be in a transition from offlined DMA to onlined
101 101 * DMA.
102 102 */
103 103 if (tx_ring_p == NULL) {
104 104 ASSERT(tx_ring_p != NULL);
105 105 freemsg(mp);
106 106 return ((mblk_t *)NULL);
107 107 }
108 108
109 109 /*
110 110 * Valid DMA?
111 111 */
112 112 ASSERT(nxgep == tx_ring_p->nxgep);
113 113
114 114 /*
115 115 * Make sure DMA is not offlined.
116 116 */
117 117 if (isLDOMservice(nxgep) && tx_ring_p->tx_ring_offline) {
118 118 ASSERT(!tx_ring_p->tx_ring_offline);
119 119 freemsg(mp);
120 120 return ((mblk_t *)NULL);
121 121 }
122 122
123 123 /*
124 124 * Transmit the packet.
125 125 */
126 126 status = nxge_start(nxgep, tx_ring_p, mp);
127 127 if (status) {
128 128 nxge_tx_ring_dispatch(tx_ring_p);
129 129 return (mp);
130 130 }
131 131
132 132 return ((mblk_t *)NULL);
133 133 }
134 134
135 135 int
136 136 nxge_start(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
137 137 {
138 138 int dma_status, status = 0;
139 139 p_tx_desc_t tx_desc_ring_vp;
140 140 npi_handle_t npi_desc_handle;
141 141 nxge_os_dma_handle_t tx_desc_dma_handle;
142 142 p_tx_desc_t tx_desc_p;
143 143 p_tx_msg_t tx_msg_ring;
144 144 p_tx_msg_t tx_msg_p;
145 145 tx_desc_t tx_desc, *tmp_desc_p;
146 146 tx_desc_t sop_tx_desc, *sop_tx_desc_p;
147 147 p_tx_pkt_header_t hdrp;
148 148 tx_pkt_hdr_all_t tmp_hdrp;
149 149 p_tx_pkt_hdr_all_t pkthdrp;
150 150 uint8_t npads = 0;
151 151 uint64_t dma_ioaddr;
152 152 uint32_t dma_flags;
153 153 int last_bidx;
154 154 uint8_t *b_rptr;
155 155 caddr_t kaddr;
156 156 uint32_t nmblks;
157 157 uint32_t ngathers;
158 158 uint32_t clen;
159 159 int len;
160 160 uint32_t pkt_len, pack_len, min_len;
161 161 uint32_t bcopy_thresh;
162 162 int i, cur_index, sop_index;
163 163 uint16_t tail_index;
164 164 boolean_t tail_wrap = B_FALSE;
165 165 nxge_dma_common_t desc_area;
166 166 nxge_os_dma_handle_t dma_handle;
167 167 ddi_dma_cookie_t dma_cookie;
168 168 npi_handle_t npi_handle;
169 169 p_mblk_t nmp;
170 170 p_mblk_t t_mp;
171 171 uint32_t ncookies;
172 172 boolean_t good_packet;
173 173 boolean_t mark_mode = B_FALSE;
174 174 p_nxge_stats_t statsp;
175 175 p_nxge_tx_ring_stats_t tdc_stats;
176 176 t_uscalar_t start_offset = 0;
177 177 t_uscalar_t stuff_offset = 0;
178 178 t_uscalar_t end_offset = 0;
179 179 t_uscalar_t value = 0;
180 180 t_uscalar_t cksum_flags = 0;
181 181 boolean_t cksum_on = B_FALSE;
182 182 uint32_t boff = 0;
183 183 uint64_t tot_xfer_len = 0;
184 184 boolean_t header_set = B_FALSE;
185 185 #ifdef NXGE_DEBUG
186 186 p_tx_desc_t tx_desc_ring_pp;
187 187 p_tx_desc_t tx_desc_pp;
188 188 tx_desc_t *save_desc_p;
189 189 int dump_len;
190 190 int sad_len;
191 191 uint64_t sad;
192 192 int xfer_len;
193 193 uint32_t msgsize;
194 194 #endif
195 195 p_mblk_t mp_chain = NULL;
196 196 boolean_t is_lso = B_FALSE;
197 197 boolean_t lso_again;
198 198 int cur_index_lso;
199 199 p_mblk_t nmp_lso_save;
200 200 uint32_t lso_ngathers;
201 201 boolean_t lso_tail_wrap = B_FALSE;
202 202
203 203 NXGE_DEBUG_MSG((nxgep, TX_CTL,
204 204 "==> nxge_start: tx dma channel %d", tx_ring_p->tdc));
205 205 NXGE_DEBUG_MSG((nxgep, TX_CTL,
206 206 "==> nxge_start: Starting tdc %d desc pending %d",
207 207 tx_ring_p->tdc, tx_ring_p->descs_pending));
208 208
209 209 statsp = nxgep->statsp;
210 210
211 211 if (!isLDOMguest(nxgep)) {
212 212 switch (nxgep->mac.portmode) {
213 213 default:
214 214 if (nxgep->statsp->port_stats.lb_mode ==
215 215 nxge_lb_normal) {
216 216 if (!statsp->mac_stats.link_up) {
217 217 freemsg(mp);
218 218 NXGE_DEBUG_MSG((nxgep, TX_CTL,
219 219 "==> nxge_start: "
220 220 "link not up"));
221 221 goto nxge_start_fail1;
222 222 }
223 223 }
224 224 break;
225 225 case PORT_10G_FIBER:
226 226 /*
227 227 * For the following modes, check the link status
228 228 * before sending the packet out:
229 229 * nxge_lb_normal,
230 230 * nxge_lb_ext10g,
231 231 * nxge_lb_ext1000,
232 232 * nxge_lb_ext100,
233 233 * nxge_lb_ext10.
234 234 */
235 235 if (nxgep->statsp->port_stats.lb_mode <
236 236 nxge_lb_phy10g) {
237 237 if (!statsp->mac_stats.link_up) {
238 238 freemsg(mp);
239 239 NXGE_DEBUG_MSG((nxgep, TX_CTL,
240 240 "==> nxge_start: "
241 241 "link not up"));
242 242 goto nxge_start_fail1;
243 243 }
244 244 }
245 245 break;
246 246 }
247 247 }
248 248
249 249 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
250 250 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
251 251 NXGE_DEBUG_MSG((nxgep, TX_CTL,
252 252 "==> nxge_start: hardware not initialized or stopped"));
253 253 freemsg(mp);
254 254 goto nxge_start_fail1;
255 255 }
256 256
257 257 if (nxgep->soft_lso_enable) {
258 258 mp_chain = nxge_lso_eliminate(mp);
259 259 NXGE_DEBUG_MSG((nxgep, TX_CTL,
260 260 "==> nxge_start(0): LSO mp $%p mp_chain $%p",
261 261 mp, mp_chain));
262 262 if (mp_chain == NULL) {
263 263 NXGE_ERROR_MSG((nxgep, TX_CTL,
264 264 "==> nxge_send(0): NULL mp_chain $%p != mp $%p",
265 265 mp_chain, mp));
266 266 goto nxge_start_fail1;
267 267 }
268 268 if (mp_chain != mp) {
269 269 NXGE_DEBUG_MSG((nxgep, TX_CTL,
270 270 "==> nxge_send(1): IS LSO mp_chain $%p != mp $%p",
271 271 mp_chain, mp));
272 272 is_lso = B_TRUE;
273 273 mp = mp_chain;
274 274 mp_chain = mp_chain->b_next;
275 275 mp->b_next = NULL;
276 276 }
277 277 }
278 278
279 279 mac_hcksum_get(mp, &start_offset, &stuff_offset, &end_offset,
280 280 &value, &cksum_flags);
281 281 if (!NXGE_IS_VLAN_PACKET(mp->b_rptr)) {
282 282 start_offset += sizeof (ether_header_t);
283 283 stuff_offset += sizeof (ether_header_t);
284 284 } else {
285 285 start_offset += sizeof (struct ether_vlan_header);
286 286 stuff_offset += sizeof (struct ether_vlan_header);
287 287 }
288 288
289 289 if (cksum_flags & HCK_PARTIALCKSUM) {
290 290 NXGE_DEBUG_MSG((nxgep, TX_CTL,
291 291 "==> nxge_start: mp $%p len %d "
292 292 "cksum_flags 0x%x (partial checksum) ",
293 293 mp, MBLKL(mp), cksum_flags));
294 294 cksum_on = B_TRUE;
295 295 }
296 296
297 297 pkthdrp = (p_tx_pkt_hdr_all_t)&tmp_hdrp;
298 298 pkthdrp->reserved = 0;
299 299 tmp_hdrp.pkthdr.value = 0;
300 300 nxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
301 301 0, 0, pkthdrp,
302 302 start_offset, stuff_offset);
303 303
304 304 lso_again = B_FALSE;
305 305 lso_ngathers = 0;
306 306
307 307 MUTEX_ENTER(&tx_ring_p->lock);
308 308
309 309 if (isLDOMservice(nxgep)) {
310 310 tx_ring_p->tx_ring_busy = B_TRUE;
311 311 if (tx_ring_p->tx_ring_offline) {
312 312 freemsg(mp);
313 313 tx_ring_p->tx_ring_busy = B_FALSE;
314 314 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline,
315 315 NXGE_TX_RING_OFFLINED);
316 316 MUTEX_EXIT(&tx_ring_p->lock);
317 317 return (status);
318 318 }
319 319 }
320 320
321 321 cur_index_lso = tx_ring_p->wr_index;
322 322 lso_tail_wrap = tx_ring_p->wr_index_wrap;
323 323 start_again:
324 324 ngathers = 0;
325 325 sop_index = tx_ring_p->wr_index;
326 326 #ifdef NXGE_DEBUG
327 327 if (tx_ring_p->descs_pending) {
328 328 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
329 329 "desc pending %d ", tx_ring_p->descs_pending));
330 330 }
331 331
332 332 dump_len = (int)(MBLKL(mp));
333 333 dump_len = (dump_len > 128) ? 128: dump_len;
334 334
335 335 NXGE_DEBUG_MSG((nxgep, TX_CTL,
336 336 "==> nxge_start: tdc %d: dumping ...: b_rptr $%p "
337 337 "(Before header reserve: ORIGINAL LEN %d)",
338 338 tx_ring_p->tdc,
339 339 mp->b_rptr,
340 340 dump_len));
341 341
342 342 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: dump packets "
343 343 "(IP ORIGINAL b_rptr $%p): %s", mp->b_rptr,
344 344 nxge_dump_packet((char *)mp->b_rptr, dump_len)));
345 345 #endif
346 346
347 347 tdc_stats = tx_ring_p->tdc_stats;
348 348 mark_mode = (tx_ring_p->descs_pending &&
349 349 (((int)tx_ring_p->tx_ring_size - (int)tx_ring_p->descs_pending) <
350 350 (int)nxge_tx_minfree));
351 351
352 352 NXGE_DEBUG_MSG((nxgep, TX_CTL,
353 353 "TX Descriptor ring is channel %d mark mode %d",
354 354 tx_ring_p->tdc, mark_mode));
355 355
356 356 if ((tx_ring_p->descs_pending + lso_ngathers) >= nxge_reclaim_pending) {
357 357 if (!nxge_txdma_reclaim(nxgep, tx_ring_p,
358 358 (nxge_tx_minfree + lso_ngathers))) {
359 359 NXGE_DEBUG_MSG((nxgep, TX_CTL,
360 360 "TX Descriptor ring is full: channel %d",
361 361 tx_ring_p->tdc));
362 362 NXGE_DEBUG_MSG((nxgep, TX_CTL,
363 363 "TX Descriptor ring is full: channel %d",
364 364 tx_ring_p->tdc));
365 365 if (is_lso) {
↓ open down ↓ |
365 lines elided |
↑ open up ↑ |
366 366 /*
367 367 * free the current mp and mp_chain if not FULL.
368 368 */
369 369 tdc_stats->tx_no_desc++;
370 370 NXGE_DEBUG_MSG((nxgep, TX_CTL,
371 371 "LSO packet: TX Descriptor ring is full: "
372 372 "channel %d",
373 373 tx_ring_p->tdc));
374 374 goto nxge_start_fail_lso;
375 375 } else {
376 - (void) cas32((uint32_t *)&tx_ring_p->queueing,
377 - 0, 1);
376 + (void) atomic_cas_32(
377 + (uint32_t *)&tx_ring_p->queueing, 0, 1);
378 378 tdc_stats->tx_no_desc++;
379 379
380 380 if (isLDOMservice(nxgep)) {
381 381 tx_ring_p->tx_ring_busy = B_FALSE;
382 382 if (tx_ring_p->tx_ring_offline) {
383 383 (void) atomic_swap_32(
384 384 &tx_ring_p->tx_ring_offline,
385 385 NXGE_TX_RING_OFFLINED);
386 386 }
387 387 }
388 388
389 389 MUTEX_EXIT(&tx_ring_p->lock);
390 390 status = 1;
391 391 goto nxge_start_fail1;
392 392 }
393 393 }
394 394 }
395 395
396 396 nmp = mp;
397 397 i = sop_index = tx_ring_p->wr_index;
398 398 nmblks = 0;
399 399 ngathers = 0;
400 400 pkt_len = 0;
401 401 pack_len = 0;
402 402 clen = 0;
403 403 last_bidx = -1;
404 404 good_packet = B_TRUE;
405 405
406 406 desc_area = tx_ring_p->tdc_desc;
407 407 npi_handle = desc_area.npi_handle;
408 408 npi_desc_handle.regh = (nxge_os_acc_handle_t)
409 409 DMA_COMMON_ACC_HANDLE(desc_area);
410 410 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
411 411 tx_desc_dma_handle = (nxge_os_dma_handle_t)
412 412 DMA_COMMON_HANDLE(desc_area);
413 413 tx_msg_ring = tx_ring_p->tx_msg_ring;
414 414
415 415 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: wr_index %d i %d",
416 416 sop_index, i));
417 417
418 418 #ifdef NXGE_DEBUG
419 419 msgsize = msgdsize(nmp);
420 420 NXGE_DEBUG_MSG((nxgep, TX_CTL,
421 421 "==> nxge_start(1): wr_index %d i %d msgdsize %d",
422 422 sop_index, i, msgsize));
423 423 #endif
424 424 /*
425 425 * The first 16 bytes of the premapped buffer are reserved
426 426 * for header. No padding will be used.
427 427 */
428 428 pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
429 429 if (nxge_tx_use_bcopy && (nxgep->niu_type != N2_NIU)) {
430 430 bcopy_thresh = (nxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
431 431 } else {
432 432 bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
433 433 }
434 434 while (nmp) {
435 435 good_packet = B_TRUE;
436 436 b_rptr = nmp->b_rptr;
437 437 len = MBLKL(nmp);
438 438 if (len <= 0) {
439 439 nmp = nmp->b_cont;
440 440 continue;
441 441 }
442 442 nmblks++;
443 443
444 444 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(1): nmblks %d "
445 445 "len %d pkt_len %d pack_len %d",
446 446 nmblks, len, pkt_len, pack_len));
447 447 /*
448 448 * Hardware limits the transfer length to 4K for NIU and
449 449 * 4076 (TX_MAX_TRANSFER_LENGTH) for Neptune. But we just
450 450 * use TX_MAX_TRANSFER_LENGTH as the limit for both.
451 451 * If len is longer than the limit, then we break nmp into
452 452 * two chunks: Make the first chunk equal to the limit and
453 453 * the second chunk for the remaining data. If the second
454 454 * chunk is still larger than the limit, then it will be
455 455 * broken into two in the next pass.
456 456 */
457 457 if (len > TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE) {
458 458 if ((t_mp = dupb(nmp)) != NULL) {
459 459 nmp->b_wptr = nmp->b_rptr +
460 460 (TX_MAX_TRANSFER_LENGTH
461 461 - TX_PKT_HEADER_SIZE);
462 462 t_mp->b_rptr = nmp->b_wptr;
463 463 t_mp->b_cont = nmp->b_cont;
464 464 nmp->b_cont = t_mp;
465 465 len = MBLKL(nmp);
466 466 } else {
467 467 if (is_lso) {
468 468 NXGE_DEBUG_MSG((nxgep, TX_CTL,
469 469 "LSO packet: dupb failed: "
470 470 "channel %d",
471 471 tx_ring_p->tdc));
472 472 mp = nmp;
473 473 goto nxge_start_fail_lso;
474 474 } else {
475 475 good_packet = B_FALSE;
476 476 goto nxge_start_fail2;
477 477 }
478 478 }
479 479 }
480 480 tx_desc.value = 0;
481 481 tx_desc_p = &tx_desc_ring_vp[i];
482 482 #ifdef NXGE_DEBUG
483 483 tx_desc_pp = &tx_desc_ring_pp[i];
484 484 #endif
485 485 tx_msg_p = &tx_msg_ring[i];
486 486 #if defined(__i386)
487 487 npi_desc_handle.regp = (uint32_t)tx_desc_p;
488 488 #else
489 489 npi_desc_handle.regp = (uint64_t)tx_desc_p;
490 490 #endif
491 491 if (!header_set &&
492 492 ((!nxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
493 493 (len >= bcopy_thresh))) {
494 494 header_set = B_TRUE;
495 495 bcopy_thresh += TX_PKT_HEADER_SIZE;
496 496 boff = 0;
497 497 pack_len = 0;
498 498 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
499 499 hdrp = (p_tx_pkt_header_t)kaddr;
500 500 clen = pkt_len;
501 501 dma_handle = tx_msg_p->buf_dma_handle;
502 502 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
503 503 (void) ddi_dma_sync(dma_handle,
504 504 i * nxge_bcopy_thresh, nxge_bcopy_thresh,
505 505 DDI_DMA_SYNC_FORDEV);
506 506
507 507 tx_msg_p->flags.dma_type = USE_BCOPY;
508 508 goto nxge_start_control_header_only;
509 509 }
510 510
511 511 pkt_len += len;
512 512 pack_len += len;
513 513
514 514 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(3): "
515 515 "desc entry %d "
516 516 "DESC IOADDR $%p "
517 517 "desc_vp $%p tx_desc_p $%p "
518 518 "desc_pp $%p tx_desc_pp $%p "
519 519 "len %d pkt_len %d pack_len %d",
520 520 i,
521 521 DMA_COMMON_IOADDR(desc_area),
522 522 tx_desc_ring_vp, tx_desc_p,
523 523 tx_desc_ring_pp, tx_desc_pp,
524 524 len, pkt_len, pack_len));
525 525
526 526 if (len < bcopy_thresh) {
527 527 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(4): "
528 528 "USE BCOPY: "));
529 529 if (nxge_tx_tiny_pack) {
530 530 uint32_t blst =
531 531 TXDMA_DESC_NEXT_INDEX(i, -1,
532 532 tx_ring_p->tx_wrap_mask);
533 533 NXGE_DEBUG_MSG((nxgep, TX_CTL,
534 534 "==> nxge_start(5): pack"));
535 535 if ((pack_len <= bcopy_thresh) &&
536 536 (last_bidx == blst)) {
537 537 NXGE_DEBUG_MSG((nxgep, TX_CTL,
538 538 "==> nxge_start: pack(6) "
539 539 "(pkt_len %d pack_len %d)",
540 540 pkt_len, pack_len));
541 541 i = blst;
542 542 tx_desc_p = &tx_desc_ring_vp[i];
543 543 #ifdef NXGE_DEBUG
544 544 tx_desc_pp = &tx_desc_ring_pp[i];
545 545 #endif
546 546 tx_msg_p = &tx_msg_ring[i];
547 547 boff = pack_len - len;
548 548 ngathers--;
549 549 } else if (pack_len > bcopy_thresh &&
550 550 header_set) {
551 551 pack_len = len;
552 552 boff = 0;
553 553 bcopy_thresh = nxge_bcopy_thresh;
554 554 NXGE_DEBUG_MSG((nxgep, TX_CTL,
555 555 "==> nxge_start(7): > max NEW "
556 556 "bcopy thresh %d "
557 557 "pkt_len %d pack_len %d(next)",
558 558 bcopy_thresh,
559 559 pkt_len, pack_len));
560 560 }
561 561 last_bidx = i;
562 562 }
563 563 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
564 564 if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
565 565 hdrp = (p_tx_pkt_header_t)kaddr;
566 566 header_set = B_TRUE;
567 567 NXGE_DEBUG_MSG((nxgep, TX_CTL,
568 568 "==> nxge_start(7_x2): "
569 569 "pkt_len %d pack_len %d (new hdrp $%p)",
570 570 pkt_len, pack_len, hdrp));
571 571 }
572 572 tx_msg_p->flags.dma_type = USE_BCOPY;
573 573 kaddr += boff;
574 574 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(8): "
575 575 "USE BCOPY: before bcopy "
576 576 "DESC IOADDR $%p entry %d "
577 577 "bcopy packets %d "
578 578 "bcopy kaddr $%p "
579 579 "bcopy ioaddr (SAD) $%p "
580 580 "bcopy clen %d "
581 581 "bcopy boff %d",
582 582 DMA_COMMON_IOADDR(desc_area), i,
583 583 tdc_stats->tx_hdr_pkts,
584 584 kaddr,
585 585 dma_ioaddr,
586 586 clen,
587 587 boff));
588 588 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
589 589 "1USE BCOPY: "));
590 590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
591 591 "2USE BCOPY: "));
592 592 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: "
593 593 "last USE BCOPY: copy from b_rptr $%p "
594 594 "to KADDR $%p (len %d offset %d",
595 595 b_rptr, kaddr, len, boff));
596 596
597 597 bcopy(b_rptr, kaddr, len);
598 598
599 599 #ifdef NXGE_DEBUG
600 600 dump_len = (len > 128) ? 128: len;
601 601 NXGE_DEBUG_MSG((nxgep, TX_CTL,
602 602 "==> nxge_start: dump packets "
603 603 "(After BCOPY len %d)"
604 604 "(b_rptr $%p): %s", len, nmp->b_rptr,
605 605 nxge_dump_packet((char *)nmp->b_rptr,
606 606 dump_len)));
607 607 #endif
608 608
609 609 dma_handle = tx_msg_p->buf_dma_handle;
610 610 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
611 611 (void) ddi_dma_sync(dma_handle,
612 612 i * nxge_bcopy_thresh, nxge_bcopy_thresh,
613 613 DDI_DMA_SYNC_FORDEV);
614 614 clen = len + boff;
615 615 tdc_stats->tx_hdr_pkts++;
616 616 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(9): "
617 617 "USE BCOPY: "
618 618 "DESC IOADDR $%p entry %d "
619 619 "bcopy packets %d "
620 620 "bcopy kaddr $%p "
621 621 "bcopy ioaddr (SAD) $%p "
622 622 "bcopy clen %d "
623 623 "bcopy boff %d",
624 624 DMA_COMMON_IOADDR(desc_area),
625 625 i,
626 626 tdc_stats->tx_hdr_pkts,
627 627 kaddr,
628 628 dma_ioaddr,
629 629 clen,
630 630 boff));
631 631 } else {
632 632 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(12): "
633 633 "USE DVMA: len %d", len));
634 634 tx_msg_p->flags.dma_type = USE_DMA;
635 635 dma_flags = DDI_DMA_WRITE;
636 636 if (len < nxge_dma_stream_thresh) {
637 637 dma_flags |= DDI_DMA_CONSISTENT;
638 638 } else {
639 639 dma_flags |= DDI_DMA_STREAMING;
640 640 }
641 641
642 642 dma_handle = tx_msg_p->dma_handle;
643 643 dma_status = ddi_dma_addr_bind_handle(dma_handle, NULL,
644 644 (caddr_t)b_rptr, len, dma_flags,
645 645 DDI_DMA_DONTWAIT, NULL,
646 646 &dma_cookie, &ncookies);
647 647 if (dma_status == DDI_DMA_MAPPED) {
648 648 dma_ioaddr = dma_cookie.dmac_laddress;
649 649 len = (int)dma_cookie.dmac_size;
650 650 clen = (uint32_t)dma_cookie.dmac_size;
651 651 NXGE_DEBUG_MSG((nxgep, TX_CTL,
652 652 "==> nxge_start(12_1): "
653 653 "USE DVMA: len %d clen %d "
654 654 "ngathers %d",
655 655 len, clen,
656 656 ngathers));
657 657 #if defined(__i386)
658 658 npi_desc_handle.regp = (uint32_t)tx_desc_p;
659 659 #else
660 660 npi_desc_handle.regp = (uint64_t)tx_desc_p;
661 661 #endif
662 662 while (ncookies > 1) {
663 663 ngathers++;
664 664 /*
665 665 * this is the fix for multiple
666 666 * cookies, which are basically
667 667 * a descriptor entry, we don't set
668 668 * SOP bit as well as related fields
669 669 */
670 670
671 671 (void) npi_txdma_desc_gather_set(
672 672 npi_desc_handle,
673 673 &tx_desc,
674 674 (ngathers -1),
675 675 mark_mode,
676 676 ngathers,
677 677 dma_ioaddr,
678 678 clen);
679 679
680 680 tx_msg_p->tx_msg_size = clen;
681 681 NXGE_DEBUG_MSG((nxgep, TX_CTL,
682 682 "==> nxge_start: DMA "
683 683 "ncookie %d "
684 684 "ngathers %d "
685 685 "dma_ioaddr $%p len %d"
686 686 "desc $%p descp $%p (%d)",
687 687 ncookies,
688 688 ngathers,
689 689 dma_ioaddr, clen,
690 690 *tx_desc_p, tx_desc_p, i));
691 691
692 692 ddi_dma_nextcookie(dma_handle,
693 693 &dma_cookie);
694 694 dma_ioaddr =
695 695 dma_cookie.dmac_laddress;
696 696
697 697 len = (int)dma_cookie.dmac_size;
698 698 clen = (uint32_t)dma_cookie.dmac_size;
699 699 NXGE_DEBUG_MSG((nxgep, TX_CTL,
700 700 "==> nxge_start(12_2): "
701 701 "USE DVMA: len %d clen %d ",
702 702 len, clen));
703 703
704 704 i = TXDMA_DESC_NEXT_INDEX(i, 1,
705 705 tx_ring_p->tx_wrap_mask);
706 706 tx_desc_p = &tx_desc_ring_vp[i];
707 707
708 708 #if defined(__i386)
709 709 npi_desc_handle.regp =
710 710 (uint32_t)tx_desc_p;
711 711 #else
712 712 npi_desc_handle.regp =
713 713 (uint64_t)tx_desc_p;
714 714 #endif
715 715 tx_msg_p = &tx_msg_ring[i];
716 716 tx_msg_p->flags.dma_type = USE_NONE;
717 717 tx_desc.value = 0;
718 718
719 719 ncookies--;
720 720 }
721 721 tdc_stats->tx_ddi_pkts++;
722 722 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start:"
723 723 "DMA: ddi packets %d",
724 724 tdc_stats->tx_ddi_pkts));
725 725 } else {
726 726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
727 727 "dma mapping failed for %d "
728 728 "bytes addr $%p flags %x (%d)",
729 729 len, b_rptr, status, status));
730 730 good_packet = B_FALSE;
731 731 tdc_stats->tx_dma_bind_fail++;
732 732 tx_msg_p->flags.dma_type = USE_NONE;
733 733 if (is_lso) {
734 734 mp = nmp;
735 735 goto nxge_start_fail_lso;
736 736 } else {
737 737 status = 1;
738 738 goto nxge_start_fail2;
739 739 }
740 740 }
741 741 } /* ddi dvma */
742 742
743 743 if (is_lso) {
744 744 nmp_lso_save = nmp;
745 745 }
746 746 nmp = nmp->b_cont;
747 747 nxge_start_control_header_only:
748 748 #if defined(__i386)
749 749 npi_desc_handle.regp = (uint32_t)tx_desc_p;
750 750 #else
751 751 npi_desc_handle.regp = (uint64_t)tx_desc_p;
752 752 #endif
753 753 ngathers++;
754 754
755 755 if (ngathers == 1) {
756 756 #ifdef NXGE_DEBUG
757 757 save_desc_p = &sop_tx_desc;
758 758 #endif
759 759 sop_tx_desc_p = &sop_tx_desc;
760 760 sop_tx_desc_p->value = 0;
761 761 sop_tx_desc_p->bits.hdw.tr_len = clen;
762 762 sop_tx_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
763 763 sop_tx_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
764 764 } else {
765 765 #ifdef NXGE_DEBUG
766 766 save_desc_p = &tx_desc;
767 767 #endif
768 768 tmp_desc_p = &tx_desc;
769 769 tmp_desc_p->value = 0;
770 770 tmp_desc_p->bits.hdw.tr_len = clen;
771 771 tmp_desc_p->bits.hdw.sad = dma_ioaddr >> 32;
772 772 tmp_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff;
773 773
774 774 tx_desc_p->value = tmp_desc_p->value;
775 775 }
776 776
777 777 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(13): "
778 778 "Desc_entry %d ngathers %d "
779 779 "desc_vp $%p tx_desc_p $%p "
780 780 "len %d clen %d pkt_len %d pack_len %d nmblks %d "
781 781 "dma_ioaddr (SAD) $%p mark %d",
782 782 i, ngathers,
783 783 tx_desc_ring_vp, tx_desc_p,
784 784 len, clen, pkt_len, pack_len, nmblks,
785 785 dma_ioaddr, mark_mode));
786 786
787 787 #ifdef NXGE_DEBUG
788 788 npi_desc_handle.nxgep = nxgep;
789 789 npi_desc_handle.function.function = nxgep->function_num;
790 790 npi_desc_handle.function.instance = nxgep->instance;
791 791 sad = (save_desc_p->value & TX_PKT_DESC_SAD_MASK);
792 792 xfer_len = ((save_desc_p->value & TX_PKT_DESC_TR_LEN_MASK) >>
793 793 TX_PKT_DESC_TR_LEN_SHIFT);
794 794
795 795
796 796 NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
797 797 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
798 798 "mark %d sop %d\n",
799 799 save_desc_p->value,
800 800 sad,
801 801 save_desc_p->bits.hdw.tr_len,
802 802 xfer_len,
803 803 save_desc_p->bits.hdw.num_ptr,
804 804 save_desc_p->bits.hdw.mark,
805 805 save_desc_p->bits.hdw.sop));
806 806
807 807 npi_txdma_dump_desc_one(npi_desc_handle, NULL, i);
808 808 #endif
809 809
810 810 tx_msg_p->tx_msg_size = clen;
811 811 i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
812 812 if (ngathers > nxge_tx_max_gathers) {
813 813 good_packet = B_FALSE;
814 814 mac_hcksum_get(mp, &start_offset,
815 815 &stuff_offset, &end_offset, &value,
816 816 &cksum_flags);
817 817
818 818 NXGE_DEBUG_MSG((NULL, TX_CTL,
819 819 "==> nxge_start(14): pull msg - "
820 820 "len %d pkt_len %d ngathers %d",
821 821 len, pkt_len, ngathers));
822 822
823 823 /*
824 824 * Just give up on this packet.
825 825 */
826 826 if (is_lso) {
827 827 mp = nmp_lso_save;
828 828 goto nxge_start_fail_lso;
829 829 }
830 830 status = 0;
831 831 goto nxge_start_fail2;
832 832 }
833 833 } /* while (nmp) */
834 834
835 835 tx_msg_p->tx_message = mp;
836 836 tx_desc_p = &tx_desc_ring_vp[sop_index];
837 837 #if defined(__i386)
838 838 npi_desc_handle.regp = (uint32_t)tx_desc_p;
839 839 #else
840 840 npi_desc_handle.regp = (uint64_t)tx_desc_p;
841 841 #endif
842 842
843 843 pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
844 844 pkthdrp->reserved = 0;
845 845 hdrp->value = 0;
846 846 bcopy(&tmp_hdrp, hdrp, sizeof (tx_pkt_header_t));
847 847
848 848 if (pkt_len > NXGE_MTU_DEFAULT_MAX) {
849 849 tdc_stats->tx_jumbo_pkts++;
850 850 }
851 851
852 852 min_len = (ETHERMIN + TX_PKT_HEADER_SIZE + (npads * 2));
853 853 if (pkt_len < min_len) {
854 854 /* Assume we use bcopy to premapped buffers */
855 855 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
856 856 NXGE_DEBUG_MSG((NULL, TX_CTL,
857 857 "==> nxge_start(14-1): < (msg_min + 16)"
858 858 "len %d pkt_len %d min_len %d bzero %d ngathers %d",
859 859 len, pkt_len, min_len, (min_len - pkt_len), ngathers));
860 860 bzero((kaddr + pkt_len), (min_len - pkt_len));
861 861 pkt_len = tx_msg_p->tx_msg_size = min_len;
862 862
863 863 sop_tx_desc_p->bits.hdw.tr_len = min_len;
864 864
865 865 NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
866 866 tx_desc_p->value = sop_tx_desc_p->value;
867 867
868 868 NXGE_DEBUG_MSG((NULL, TX_CTL,
869 869 "==> nxge_start(14-2): < msg_min - "
870 870 "len %d pkt_len %d min_len %d ngathers %d",
871 871 len, pkt_len, min_len, ngathers));
872 872 }
873 873
874 874 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: cksum_flags 0x%x ",
875 875 cksum_flags));
876 876 {
877 877 uint64_t tmp_len;
878 878
879 879 /* pkt_len already includes 16 + paddings!! */
880 880 /* Update the control header length */
881 881 tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
882 882 tmp_len = hdrp->value |
883 883 (tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
884 884
885 885 NXGE_DEBUG_MSG((nxgep, TX_CTL,
886 886 "==> nxge_start(15_x1): setting SOP "
887 887 "tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
888 888 "0x%llx hdrp->value 0x%llx",
889 889 tot_xfer_len, tot_xfer_len, pkt_len,
890 890 tmp_len, hdrp->value));
891 891 #if defined(_BIG_ENDIAN)
892 892 hdrp->value = ddi_swap64(tmp_len);
893 893 #else
894 894 hdrp->value = tmp_len;
895 895 #endif
896 896 NXGE_DEBUG_MSG((nxgep,
897 897 TX_CTL, "==> nxge_start(15_x2): setting SOP "
898 898 "after SWAP: tot_xfer_len 0x%llx pkt_len %d "
899 899 "tmp_len 0x%llx hdrp->value 0x%llx",
900 900 tot_xfer_len, pkt_len,
901 901 tmp_len, hdrp->value));
902 902 }
903 903
904 904 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(15): setting SOP "
905 905 "wr_index %d "
906 906 "tot_xfer_len (%d) pkt_len %d npads %d",
907 907 sop_index,
908 908 tot_xfer_len, pkt_len,
909 909 npads));
910 910
911 911 sop_tx_desc_p->bits.hdw.sop = 1;
912 912 sop_tx_desc_p->bits.hdw.mark = mark_mode;
913 913 sop_tx_desc_p->bits.hdw.num_ptr = ngathers;
914 914
915 915 NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value);
916 916
917 917 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(16): set SOP done"));
918 918
919 919 #ifdef NXGE_DEBUG
920 920 npi_desc_handle.nxgep = nxgep;
921 921 npi_desc_handle.function.function = nxgep->function_num;
922 922 npi_desc_handle.function.instance = nxgep->instance;
923 923
924 924 NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n"
925 925 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
926 926 save_desc_p->value,
927 927 sad,
928 928 save_desc_p->bits.hdw.tr_len,
929 929 xfer_len,
930 930 save_desc_p->bits.hdw.num_ptr,
931 931 save_desc_p->bits.hdw.mark,
932 932 save_desc_p->bits.hdw.sop));
933 933 (void) npi_txdma_dump_desc_one(npi_desc_handle, NULL, sop_index);
934 934
935 935 dump_len = (pkt_len > 128) ? 128: pkt_len;
936 936 NXGE_DEBUG_MSG((nxgep, TX_CTL,
937 937 "==> nxge_start: dump packets(17) (after sop set, len "
938 938 " (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
939 939 "ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
940 940 (char *)hdrp,
941 941 nxge_dump_packet((char *)hdrp, dump_len)));
942 942 NXGE_DEBUG_MSG((nxgep, TX_CTL,
943 943 "==> nxge_start(18): TX desc sync: sop_index %d",
944 944 sop_index));
945 945 #endif
946 946
947 947 if ((ngathers == 1) || tx_ring_p->wr_index < i) {
948 948 (void) ddi_dma_sync(tx_desc_dma_handle,
949 949 sop_index * sizeof (tx_desc_t),
950 950 ngathers * sizeof (tx_desc_t),
951 951 DDI_DMA_SYNC_FORDEV);
952 952
953 953 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(19): sync 1 "
954 954 "cs_off = 0x%02X cs_s_off = 0x%02X "
955 955 "pkt_len %d ngathers %d sop_index %d\n",
956 956 stuff_offset, start_offset,
957 957 pkt_len, ngathers, sop_index));
958 958 } else { /* more than one descriptor and wrap around */
959 959 uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
960 960 (void) ddi_dma_sync(tx_desc_dma_handle,
961 961 sop_index * sizeof (tx_desc_t),
962 962 nsdescs * sizeof (tx_desc_t),
963 963 DDI_DMA_SYNC_FORDEV);
964 964 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(20): sync 1 "
965 965 "cs_off = 0x%02X cs_s_off = 0x%02X "
966 966 "pkt_len %d ngathers %d sop_index %d\n",
967 967 stuff_offset, start_offset,
968 968 pkt_len, ngathers, sop_index));
969 969
970 970 (void) ddi_dma_sync(tx_desc_dma_handle,
971 971 0,
972 972 (ngathers - nsdescs) * sizeof (tx_desc_t),
973 973 DDI_DMA_SYNC_FORDEV);
974 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(21): sync 2 "
975 975 "cs_off = 0x%02X cs_s_off = 0x%02X "
976 976 "pkt_len %d ngathers %d sop_index %d\n",
977 977 stuff_offset, start_offset,
978 978 pkt_len, ngathers, sop_index));
979 979 }
980 980
981 981 tail_index = tx_ring_p->wr_index;
982 982 tail_wrap = tx_ring_p->wr_index_wrap;
983 983
984 984 tx_ring_p->wr_index = i;
985 985 if (tx_ring_p->wr_index <= tail_index) {
986 986 tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
987 987 B_FALSE : B_TRUE);
988 988 }
989 989
990 990 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX kick: "
991 991 "channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
992 992 tx_ring_p->tdc,
993 993 tx_ring_p->wr_index,
994 994 tx_ring_p->wr_index_wrap,
995 995 ngathers,
996 996 tx_ring_p->descs_pending));
997 997
998 998 if (is_lso) {
999 999 lso_ngathers += ngathers;
1000 1000 if (mp_chain != NULL) {
1001 1001 mp = mp_chain;
1002 1002 mp_chain = mp_chain->b_next;
1003 1003 mp->b_next = NULL;
1004 1004 if (nxge_lso_kick_cnt == lso_ngathers) {
1005 1005 tx_ring_p->descs_pending += lso_ngathers;
1006 1006 {
1007 1007 tx_ring_kick_t kick;
1008 1008
1009 1009 kick.value = 0;
1010 1010 kick.bits.ldw.wrap =
1011 1011 tx_ring_p->wr_index_wrap;
1012 1012 kick.bits.ldw.tail =
1013 1013 (uint16_t)tx_ring_p->wr_index;
1014 1014
1015 1015 /* Kick the Transmit kick register */
1016 1016 TXDMA_REG_WRITE64(
1017 1017 NXGE_DEV_NPI_HANDLE(nxgep),
1018 1018 TX_RING_KICK_REG,
1019 1019 (uint8_t)tx_ring_p->tdc,
1020 1020 kick.value);
1021 1021 tdc_stats->tx_starts++;
1022 1022
1023 1023 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1024 1024 "==> nxge_start: more LSO: "
1025 1025 "LSO_CNT %d",
1026 1026 lso_ngathers));
1027 1027 }
1028 1028 lso_ngathers = 0;
1029 1029 ngathers = 0;
1030 1030 cur_index_lso = sop_index = tx_ring_p->wr_index;
1031 1031 lso_tail_wrap = tx_ring_p->wr_index_wrap;
1032 1032 }
1033 1033 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1034 1034 "==> nxge_start: lso again: "
1035 1035 "lso_gathers %d ngathers %d cur_index_lso %d "
1036 1036 "wr_index %d sop_index %d",
1037 1037 lso_ngathers, ngathers, cur_index_lso,
1038 1038 tx_ring_p->wr_index, sop_index));
1039 1039
1040 1040 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1041 1041 "==> nxge_start: next : count %d",
1042 1042 lso_ngathers));
1043 1043 lso_again = B_TRUE;
1044 1044 goto start_again;
1045 1045 }
1046 1046 ngathers = lso_ngathers;
1047 1047 }
1048 1048
1049 1049 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX KICKING: "));
1050 1050
1051 1051 {
1052 1052 tx_ring_kick_t kick;
1053 1053
1054 1054 kick.value = 0;
1055 1055 kick.bits.ldw.wrap = tx_ring_p->wr_index_wrap;
1056 1056 kick.bits.ldw.tail = (uint16_t)tx_ring_p->wr_index;
1057 1057
1058 1058 /* Kick start the Transmit kick register */
1059 1059 TXDMA_REG_WRITE64(NXGE_DEV_NPI_HANDLE(nxgep),
1060 1060 TX_RING_KICK_REG,
1061 1061 (uint8_t)tx_ring_p->tdc,
1062 1062 kick.value);
1063 1063 }
1064 1064
1065 1065 tx_ring_p->descs_pending += ngathers;
1066 1066 tdc_stats->tx_starts++;
1067 1067
1068 1068 if (isLDOMservice(nxgep)) {
1069 1069 tx_ring_p->tx_ring_busy = B_FALSE;
1070 1070 if (tx_ring_p->tx_ring_offline) {
1071 1071 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline,
1072 1072 NXGE_TX_RING_OFFLINED);
1073 1073 }
1074 1074 }
1075 1075
1076 1076 MUTEX_EXIT(&tx_ring_p->lock);
1077 1077
1078 1078 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
1079 1079 return (status);
1080 1080
1081 1081 nxge_start_fail_lso:
1082 1082 status = 0;
1083 1083 good_packet = B_FALSE;
1084 1084 if (mp != NULL)
1085 1085 freemsg(mp);
1086 1086 if (mp_chain != NULL)
1087 1087 freemsgchain(mp_chain);
1088 1088
1089 1089 if (!lso_again && !ngathers) {
1090 1090 if (isLDOMservice(nxgep)) {
1091 1091 tx_ring_p->tx_ring_busy = B_FALSE;
1092 1092 if (tx_ring_p->tx_ring_offline) {
1093 1093 (void) atomic_swap_32(
1094 1094 &tx_ring_p->tx_ring_offline,
1095 1095 NXGE_TX_RING_OFFLINED);
1096 1096 }
1097 1097 }
1098 1098
1099 1099 MUTEX_EXIT(&tx_ring_p->lock);
1100 1100 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1101 1101 "==> nxge_start: lso exit (nothing changed)"));
1102 1102 goto nxge_start_fail1;
1103 1103 }
1104 1104
1105 1105 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1106 1106 "==> nxge_start (channel %d): before lso "
1107 1107 "lso_gathers %d ngathers %d cur_index_lso %d "
1108 1108 "wr_index %d sop_index %d lso_again %d",
1109 1109 tx_ring_p->tdc,
1110 1110 lso_ngathers, ngathers, cur_index_lso,
1111 1111 tx_ring_p->wr_index, sop_index, lso_again));
1112 1112
1113 1113 if (lso_again) {
1114 1114 lso_ngathers += ngathers;
1115 1115 ngathers = lso_ngathers;
1116 1116 sop_index = cur_index_lso;
1117 1117 tx_ring_p->wr_index = sop_index;
1118 1118 tx_ring_p->wr_index_wrap = lso_tail_wrap;
1119 1119 }
1120 1120
1121 1121 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1122 1122 "==> nxge_start (channel %d): after lso "
1123 1123 "lso_gathers %d ngathers %d cur_index_lso %d "
1124 1124 "wr_index %d sop_index %d lso_again %d",
1125 1125 tx_ring_p->tdc,
1126 1126 lso_ngathers, ngathers, cur_index_lso,
1127 1127 tx_ring_p->wr_index, sop_index, lso_again));
1128 1128
1129 1129 nxge_start_fail2:
1130 1130 if (good_packet == B_FALSE) {
1131 1131 cur_index = sop_index;
1132 1132 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: clean up"));
1133 1133 for (i = 0; i < ngathers; i++) {
1134 1134 tx_desc_p = &tx_desc_ring_vp[cur_index];
1135 1135 #if defined(__i386)
1136 1136 npi_handle.regp = (uint32_t)tx_desc_p;
1137 1137 #else
1138 1138 npi_handle.regp = (uint64_t)tx_desc_p;
1139 1139 #endif
1140 1140 tx_msg_p = &tx_msg_ring[cur_index];
1141 1141 (void) npi_txdma_desc_set_zero(npi_handle, 1);
1142 1142 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1143 1143 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1144 1144 "tx_desc_p = %X index = %d",
1145 1145 tx_desc_p, tx_ring_p->rd_index));
1146 1146 (void) dvma_unload(tx_msg_p->dvma_handle,
1147 1147 0, -1);
1148 1148 tx_msg_p->dvma_handle = NULL;
1149 1149 if (tx_ring_p->dvma_wr_index ==
1150 1150 tx_ring_p->dvma_wrap_mask)
1151 1151 tx_ring_p->dvma_wr_index = 0;
1152 1152 else
1153 1153 tx_ring_p->dvma_wr_index++;
1154 1154 tx_ring_p->dvma_pending--;
1155 1155 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
1156 1156 if (ddi_dma_unbind_handle(
1157 1157 tx_msg_p->dma_handle)) {
1158 1158 cmn_err(CE_WARN, "!nxge_start: "
1159 1159 "ddi_dma_unbind_handle failed");
1160 1160 }
1161 1161 }
1162 1162 tx_msg_p->flags.dma_type = USE_NONE;
1163 1163 cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
1164 1164 tx_ring_p->tx_wrap_mask);
1165 1165
1166 1166 }
1167 1167 }
1168 1168
1169 1169 if (isLDOMservice(nxgep)) {
1170 1170 tx_ring_p->tx_ring_busy = B_FALSE;
1171 1171 if (tx_ring_p->tx_ring_offline) {
1172 1172 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline,
1173 1173 NXGE_TX_RING_OFFLINED);
1174 1174 }
1175 1175 }
1176 1176
1177 1177 MUTEX_EXIT(&tx_ring_p->lock);
1178 1178
1179 1179 nxge_start_fail1:
1180 1180 /* Add FMA to check the access handle nxge_hregh */
1181 1181
1182 1182 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start"));
1183 1183 return (status);
1184 1184 }
1185 1185
1186 1186 /* Software LSO starts here */
1187 1187 static void
1188 1188 nxge_hcksum_retrieve(mblk_t *mp,
1189 1189 uint32_t *start, uint32_t *stuff, uint32_t *end,
1190 1190 uint32_t *value, uint32_t *flags)
1191 1191 {
1192 1192 if (mp->b_datap->db_type == M_DATA) {
1193 1193 if (flags != NULL) {
1194 1194 *flags = DB_CKSUMFLAGS(mp) & (HCK_IPV4_HDRCKSUM |
1195 1195 HCK_PARTIALCKSUM | HCK_FULLCKSUM |
1196 1196 HCK_FULLCKSUM_OK);
1197 1197 if ((*flags & (HCK_PARTIALCKSUM |
1198 1198 HCK_FULLCKSUM)) != 0) {
1199 1199 if (value != NULL)
1200 1200 *value = (uint32_t)DB_CKSUM16(mp);
1201 1201 if ((*flags & HCK_PARTIALCKSUM) != 0) {
1202 1202 if (start != NULL)
1203 1203 *start =
1204 1204 (uint32_t)DB_CKSUMSTART(mp);
1205 1205 if (stuff != NULL)
1206 1206 *stuff =
1207 1207 (uint32_t)DB_CKSUMSTUFF(mp);
1208 1208 if (end != NULL)
1209 1209 *end =
1210 1210 (uint32_t)DB_CKSUMEND(mp);
1211 1211 }
1212 1212 }
1213 1213 }
1214 1214 }
1215 1215 }
1216 1216
1217 1217 static void
1218 1218 nxge_lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags)
1219 1219 {
1220 1220 ASSERT(DB_TYPE(mp) == M_DATA);
1221 1221
1222 1222 *mss = 0;
1223 1223 if (flags != NULL) {
1224 1224 *flags = DB_CKSUMFLAGS(mp) & HW_LSO;
1225 1225 if ((*flags != 0) && (mss != NULL)) {
1226 1226 *mss = (uint32_t)DB_LSOMSS(mp);
1227 1227 }
1228 1228 NXGE_DEBUG_MSG((NULL, TX_CTL,
1229 1229 "==> nxge_lso_info_get(flag !=NULL): mss %d *flags 0x%x",
1230 1230 *mss, *flags));
1231 1231 }
1232 1232
1233 1233 NXGE_DEBUG_MSG((NULL, TX_CTL,
1234 1234 "<== nxge_lso_info_get: mss %d", *mss));
1235 1235 }
1236 1236
1237 1237 /*
1238 1238 * Do Soft LSO on the oversized packet.
1239 1239 *
1240 1240 * 1. Create a chain of message for headers.
1241 1241 * 2. Fill up header messages with proper information.
1242 1242 * 3. Copy Eithernet, IP, and TCP headers from the original message to
1243 1243 * each new message with necessary adjustments.
1244 1244 * * Unchange the ethernet header for DIX frames. (by default)
1245 1245 * * IP Total Length field is updated to MSS or less(only for the last one).
1246 1246 * * IP Identification value is incremented by one for each packet.
1247 1247 * * TCP sequence Number is recalculated according to the payload length.
1248 1248 * * Set FIN and/or PSH flags for the *last* packet if applied.
1249 1249 * * TCP partial Checksum
1250 1250 * 4. Update LSO information in the first message header.
1251 1251 * 5. Release the original message header.
1252 1252 */
1253 1253 static mblk_t *
1254 1254 nxge_do_softlso(mblk_t *mp, uint32_t mss)
1255 1255 {
1256 1256 uint32_t hckflags;
1257 1257 int pktlen;
1258 1258 int hdrlen;
1259 1259 int segnum;
1260 1260 int i;
1261 1261 struct ether_vlan_header *evh;
1262 1262 int ehlen, iphlen, tcphlen;
1263 1263 struct ip *oiph, *niph;
1264 1264 struct tcphdr *otcph, *ntcph;
1265 1265 int available, len, left;
1266 1266 uint16_t ip_id;
1267 1267 uint32_t tcp_seq;
1268 1268 #ifdef __sparc
1269 1269 uint32_t tcp_seq_tmp;
1270 1270 #endif
1271 1271 mblk_t *datamp;
1272 1272 uchar_t *rptr;
1273 1273 mblk_t *nmp;
1274 1274 mblk_t *cmp;
1275 1275 mblk_t *mp_chain;
1276 1276 boolean_t do_cleanup = B_FALSE;
1277 1277 t_uscalar_t start_offset = 0;
1278 1278 t_uscalar_t stuff_offset = 0;
1279 1279 t_uscalar_t value = 0;
1280 1280 uint16_t l4_len;
1281 1281 ipaddr_t src, dst;
1282 1282 uint32_t cksum, sum, l4cksum;
1283 1283
1284 1284 NXGE_DEBUG_MSG((NULL, TX_CTL,
1285 1285 "==> nxge_do_softlso"));
1286 1286 /*
1287 1287 * check the length of LSO packet payload and calculate the number of
1288 1288 * segments to be generated.
1289 1289 */
1290 1290 pktlen = msgsize(mp);
1291 1291 evh = (struct ether_vlan_header *)mp->b_rptr;
1292 1292
1293 1293 /* VLAN? */
1294 1294 if (evh->ether_tpid == htons(ETHERTYPE_VLAN))
1295 1295 ehlen = sizeof (struct ether_vlan_header);
1296 1296 else
1297 1297 ehlen = sizeof (struct ether_header);
1298 1298 oiph = (struct ip *)(mp->b_rptr + ehlen);
1299 1299 iphlen = oiph->ip_hl * 4;
1300 1300 otcph = (struct tcphdr *)(mp->b_rptr + ehlen + iphlen);
1301 1301 tcphlen = otcph->th_off * 4;
1302 1302
1303 1303 l4_len = pktlen - ehlen - iphlen;
1304 1304
1305 1305 NXGE_DEBUG_MSG((NULL, TX_CTL,
1306 1306 "==> nxge_do_softlso: mss %d oiph $%p "
1307 1307 "original ip_sum oiph->ip_sum 0x%x "
1308 1308 "original tcp_sum otcph->th_sum 0x%x "
1309 1309 "oiph->ip_len %d pktlen %d ehlen %d "
1310 1310 "l4_len %d (0x%x) ip_len - iphlen %d ",
1311 1311 mss,
1312 1312 oiph,
1313 1313 oiph->ip_sum,
1314 1314 otcph->th_sum,
1315 1315 ntohs(oiph->ip_len), pktlen,
1316 1316 ehlen,
1317 1317 l4_len,
1318 1318 l4_len,
1319 1319 ntohs(oiph->ip_len) - iphlen));
1320 1320
1321 1321 /* IPv4 + TCP */
1322 1322 if (!(oiph->ip_v == IPV4_VERSION)) {
1323 1323 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1324 1324 "<== nxge_do_softlso: not IPV4 "
1325 1325 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d",
1326 1326 ntohs(oiph->ip_len), pktlen, ehlen,
1327 1327 tcphlen));
1328 1328 freemsg(mp);
1329 1329 return (NULL);
1330 1330 }
1331 1331
1332 1332 if (!(oiph->ip_p == IPPROTO_TCP)) {
1333 1333 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1334 1334 "<== nxge_do_softlso: not TCP "
1335 1335 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d",
1336 1336 ntohs(oiph->ip_len), pktlen, ehlen,
1337 1337 tcphlen));
1338 1338 freemsg(mp);
1339 1339 return (NULL);
1340 1340 }
1341 1341
1342 1342 if (!(ntohs(oiph->ip_len) == pktlen - ehlen)) {
1343 1343 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1344 1344 "<== nxge_do_softlso: len not matched "
1345 1345 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d",
1346 1346 ntohs(oiph->ip_len), pktlen, ehlen,
1347 1347 tcphlen));
1348 1348 freemsg(mp);
1349 1349 return (NULL);
1350 1350 }
1351 1351
1352 1352 otcph = (struct tcphdr *)(mp->b_rptr + ehlen + iphlen);
1353 1353 tcphlen = otcph->th_off * 4;
1354 1354
1355 1355 /* TCP flags can not include URG, RST, or SYN */
1356 1356 VERIFY((otcph->th_flags & (TH_SYN | TH_RST | TH_URG)) == 0);
1357 1357
1358 1358 hdrlen = ehlen + iphlen + tcphlen;
1359 1359
1360 1360 VERIFY(MBLKL(mp) >= hdrlen);
1361 1361
1362 1362 if (MBLKL(mp) > hdrlen) {
1363 1363 datamp = mp;
1364 1364 rptr = mp->b_rptr + hdrlen;
1365 1365 } else { /* = */
1366 1366 datamp = mp->b_cont;
1367 1367 rptr = datamp->b_rptr;
1368 1368 }
1369 1369
1370 1370 NXGE_DEBUG_MSG((NULL, TX_CTL,
1371 1371 "nxge_do_softlso: otcph $%p pktlen: %d, "
1372 1372 "hdrlen %d ehlen %d iphlen %d tcphlen %d "
1373 1373 "mblkl(mp): %d, mblkl(datamp): %d",
1374 1374 otcph,
1375 1375 pktlen, hdrlen, ehlen, iphlen, tcphlen,
1376 1376 (int)MBLKL(mp), (int)MBLKL(datamp)));
1377 1377
1378 1378 hckflags = 0;
1379 1379 nxge_hcksum_retrieve(mp,
1380 1380 &start_offset, &stuff_offset, &value, NULL, &hckflags);
1381 1381
1382 1382 dst = oiph->ip_dst.s_addr;
1383 1383 src = oiph->ip_src.s_addr;
1384 1384
1385 1385 cksum = (dst >> 16) + (dst & 0xFFFF) +
1386 1386 (src >> 16) + (src & 0xFFFF);
1387 1387 l4cksum = cksum + IP_TCP_CSUM_COMP;
1388 1388
1389 1389 sum = l4_len + l4cksum;
1390 1390 sum = (sum & 0xFFFF) + (sum >> 16);
1391 1391
1392 1392 NXGE_DEBUG_MSG((NULL, TX_CTL,
1393 1393 "==> nxge_do_softlso: dst 0x%x src 0x%x sum 0x%x ~new 0x%x "
1394 1394 "hckflags 0x%x start_offset %d stuff_offset %d "
1395 1395 "value (original) 0x%x th_sum 0x%x "
1396 1396 "pktlen %d l4_len %d (0x%x) "
1397 1397 "MBLKL(mp): %d, MBLKL(datamp): %d dump header %s",
1398 1398 dst, src,
1399 1399 (sum & 0xffff), (~sum & 0xffff),
1400 1400 hckflags, start_offset, stuff_offset,
1401 1401 value, otcph->th_sum,
1402 1402 pktlen,
1403 1403 l4_len,
1404 1404 l4_len,
1405 1405 ntohs(oiph->ip_len) - (int)MBLKL(mp),
1406 1406 (int)MBLKL(datamp),
1407 1407 nxge_dump_packet((char *)evh, 12)));
1408 1408
1409 1409 /*
1410 1410 * Start to process.
1411 1411 */
1412 1412 available = pktlen - hdrlen;
1413 1413 segnum = (available - 1) / mss + 1;
1414 1414
1415 1415 NXGE_DEBUG_MSG((NULL, TX_CTL,
1416 1416 "==> nxge_do_softlso: pktlen %d "
1417 1417 "MBLKL(mp): %d, MBLKL(datamp): %d "
1418 1418 "available %d mss %d segnum %d",
1419 1419 pktlen, (int)MBLKL(mp), (int)MBLKL(datamp),
1420 1420 available,
1421 1421 mss,
1422 1422 segnum));
1423 1423
1424 1424 VERIFY(segnum >= 2);
1425 1425
1426 1426 /*
1427 1427 * Try to pre-allocate all header messages
1428 1428 */
1429 1429 mp_chain = NULL;
1430 1430 for (i = 0; i < segnum; i++) {
1431 1431 if ((nmp = allocb(hdrlen, 0)) == NULL) {
1432 1432 /* Clean up the mp_chain */
1433 1433 while (mp_chain != NULL) {
1434 1434 nmp = mp_chain;
1435 1435 mp_chain = mp_chain->b_next;
1436 1436 freemsg(nmp);
1437 1437 }
1438 1438 NXGE_DEBUG_MSG((NULL, TX_CTL,
1439 1439 "<== nxge_do_softlso: "
1440 1440 "Could not allocate enough messages for headers!"));
1441 1441 freemsg(mp);
1442 1442 return (NULL);
1443 1443 }
1444 1444 nmp->b_next = mp_chain;
1445 1445 mp_chain = nmp;
1446 1446
1447 1447 NXGE_DEBUG_MSG((NULL, TX_CTL,
1448 1448 "==> nxge_do_softlso: "
1449 1449 "mp $%p nmp $%p mp_chain $%p mp_chain->b_next $%p",
1450 1450 mp, nmp, mp_chain, mp_chain->b_next));
1451 1451 }
1452 1452
1453 1453 NXGE_DEBUG_MSG((NULL, TX_CTL,
1454 1454 "==> nxge_do_softlso: mp $%p nmp $%p mp_chain $%p",
1455 1455 mp, nmp, mp_chain));
1456 1456
1457 1457 /*
1458 1458 * Associate payload with new packets
1459 1459 */
1460 1460 cmp = mp_chain;
1461 1461 left = available;
1462 1462 while (cmp != NULL) {
1463 1463 nmp = dupb(datamp);
1464 1464 if (nmp == NULL) {
1465 1465 do_cleanup = B_TRUE;
1466 1466 NXGE_DEBUG_MSG((NULL, TX_CTL,
1467 1467 "==>nxge_do_softlso: "
1468 1468 "Can not dupb(datamp), have to do clean up"));
1469 1469 goto cleanup_allocated_msgs;
1470 1470 }
1471 1471
1472 1472 NXGE_DEBUG_MSG((NULL, TX_CTL,
1473 1473 "==> nxge_do_softlso: (loop) before mp $%p cmp $%p "
1474 1474 "dupb nmp $%p len %d left %d msd %d ",
1475 1475 mp, cmp, nmp, len, left, mss));
1476 1476
1477 1477 cmp->b_cont = nmp;
1478 1478 nmp->b_rptr = rptr;
1479 1479 len = (left < mss) ? left : mss;
1480 1480 left -= len;
1481 1481
1482 1482 NXGE_DEBUG_MSG((NULL, TX_CTL,
1483 1483 "==> nxge_do_softlso: (loop) after mp $%p cmp $%p "
1484 1484 "dupb nmp $%p len %d left %d mss %d ",
1485 1485 mp, cmp, nmp, len, left, mss));
1486 1486 NXGE_DEBUG_MSG((NULL, TX_CTL,
1487 1487 "nxge_do_softlso: before available: %d, "
1488 1488 "left: %d, len: %d, segnum: %d MBLK(nmp): %d",
1489 1489 available, left, len, segnum, (int)MBLKL(nmp)));
1490 1490
1491 1491 len -= MBLKL(nmp);
1492 1492 NXGE_DEBUG_MSG((NULL, TX_CTL,
1493 1493 "nxge_do_softlso: after available: %d, "
1494 1494 "left: %d, len: %d, segnum: %d MBLK(nmp): %d",
1495 1495 available, left, len, segnum, (int)MBLKL(nmp)));
1496 1496
1497 1497 while (len > 0) {
1498 1498 mblk_t *mmp = NULL;
1499 1499
1500 1500 NXGE_DEBUG_MSG((NULL, TX_CTL,
1501 1501 "nxge_do_softlso: (4) len > 0 available: %d, "
1502 1502 "left: %d, len: %d, segnum: %d MBLK(nmp): %d",
1503 1503 available, left, len, segnum, (int)MBLKL(nmp)));
1504 1504
1505 1505 if (datamp->b_cont != NULL) {
1506 1506 datamp = datamp->b_cont;
1507 1507 rptr = datamp->b_rptr;
1508 1508 mmp = dupb(datamp);
1509 1509 if (mmp == NULL) {
1510 1510 do_cleanup = B_TRUE;
1511 1511 NXGE_DEBUG_MSG((NULL, TX_CTL,
1512 1512 "==> nxge_do_softlso: "
1513 1513 "Can not dupb(datamp) (1), :"
1514 1514 "have to do clean up"));
1515 1515 NXGE_DEBUG_MSG((NULL, TX_CTL,
1516 1516 "==> nxge_do_softlso: "
1517 1517 "available: %d, left: %d, "
1518 1518 "len: %d, MBLKL(nmp): %d",
1519 1519 available, left, len,
1520 1520 (int)MBLKL(nmp)));
1521 1521 goto cleanup_allocated_msgs;
1522 1522 }
1523 1523 } else {
1524 1524 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1525 1525 "==> nxge_do_softlso: "
1526 1526 "(1)available: %d, left: %d, "
1527 1527 "len: %d, MBLKL(nmp): %d",
1528 1528 available, left, len,
1529 1529 (int)MBLKL(nmp)));
1530 1530 cmn_err(CE_PANIC,
1531 1531 "==> nxge_do_softlso: "
1532 1532 "Pointers must have been corrupted!\n"
1533 1533 "datamp: $%p, nmp: $%p, rptr: $%p",
1534 1534 (void *)datamp,
1535 1535 (void *)nmp,
1536 1536 (void *)rptr);
1537 1537 }
1538 1538 nmp->b_cont = mmp;
1539 1539 nmp = mmp;
1540 1540 len -= MBLKL(nmp);
1541 1541 }
1542 1542 if (len < 0) {
1543 1543 nmp->b_wptr += len;
1544 1544 rptr = nmp->b_wptr;
1545 1545 NXGE_DEBUG_MSG((NULL, TX_CTL,
1546 1546 "(5) len < 0 (less than 0)"
1547 1547 "available: %d, left: %d, len: %d, MBLKL(nmp): %d",
1548 1548 available, left, len, (int)MBLKL(nmp)));
1549 1549
1550 1550 } else if (len == 0) {
1551 1551 if (datamp->b_cont != NULL) {
1552 1552 NXGE_DEBUG_MSG((NULL, TX_CTL,
1553 1553 "(5) len == 0"
1554 1554 "available: %d, left: %d, len: %d, "
1555 1555 "MBLKL(nmp): %d",
1556 1556 available, left, len, (int)MBLKL(nmp)));
1557 1557 datamp = datamp->b_cont;
1558 1558 rptr = datamp->b_rptr;
1559 1559 } else {
1560 1560 NXGE_DEBUG_MSG((NULL, TX_CTL,
1561 1561 "(6)available b_cont == NULL : %d, "
1562 1562 "left: %d, len: %d, MBLKL(nmp): %d",
1563 1563 available, left, len, (int)MBLKL(nmp)));
1564 1564
1565 1565 VERIFY(cmp->b_next == NULL);
1566 1566 VERIFY(left == 0);
1567 1567 break; /* Done! */
1568 1568 }
1569 1569 }
1570 1570 cmp = cmp->b_next;
1571 1571
1572 1572 NXGE_DEBUG_MSG((NULL, TX_CTL,
1573 1573 "(7) do_softlso: "
1574 1574 "next mp in mp_chain available len != 0 : %d, "
1575 1575 "left: %d, len: %d, MBLKL(nmp): %d",
1576 1576 available, left, len, (int)MBLKL(nmp)));
1577 1577 }
1578 1578
1579 1579 /*
1580 1580 * From now, start to fill up all headers for the first message
1581 1581 * Hardware checksum flags need to be updated separately for FULLCKSUM
1582 1582 * and PARTIALCKSUM cases. For full checksum, copy the original flags
1583 1583 * into every new packet is enough. But for HCK_PARTIALCKSUM, all
1584 1584 * required fields need to be updated properly.
1585 1585 */
1586 1586 nmp = mp_chain;
1587 1587 bcopy(mp->b_rptr, nmp->b_rptr, hdrlen);
1588 1588 nmp->b_wptr = nmp->b_rptr + hdrlen;
1589 1589 niph = (struct ip *)(nmp->b_rptr + ehlen);
1590 1590 niph->ip_len = htons(mss + iphlen + tcphlen);
1591 1591 ip_id = ntohs(niph->ip_id);
1592 1592 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen);
1593 1593 #ifdef __sparc
1594 1594 bcopy((char *)&ntcph->th_seq, &tcp_seq_tmp, 4);
1595 1595 tcp_seq = ntohl(tcp_seq_tmp);
1596 1596 #else
1597 1597 tcp_seq = ntohl(ntcph->th_seq);
1598 1598 #endif
1599 1599
1600 1600 ntcph->th_flags &= ~(TH_FIN | TH_PUSH | TH_RST);
1601 1601
1602 1602 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags;
1603 1603 DB_CKSUMSTART(nmp) = start_offset;
1604 1604 DB_CKSUMSTUFF(nmp) = stuff_offset;
1605 1605
1606 1606 /* calculate IP checksum and TCP pseudo header checksum */
1607 1607 niph->ip_sum = 0;
1608 1608 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen);
1609 1609
1610 1610 l4_len = mss + tcphlen;
1611 1611 sum = htons(l4_len) + l4cksum;
1612 1612 sum = (sum & 0xFFFF) + (sum >> 16);
1613 1613 ntcph->th_sum = (sum & 0xffff);
1614 1614
1615 1615 NXGE_DEBUG_MSG((NULL, TX_CTL,
1616 1616 "==> nxge_do_softlso: first mp $%p (mp_chain $%p) "
1617 1617 "mss %d pktlen %d l4_len %d (0x%x) "
1618 1618 "MBLKL(mp): %d, MBLKL(datamp): %d "
1619 1619 "ip_sum 0x%x "
1620 1620 "th_sum 0x%x sum 0x%x ) "
1621 1621 "dump first ip->tcp %s",
1622 1622 nmp, mp_chain,
1623 1623 mss,
1624 1624 pktlen,
1625 1625 l4_len,
1626 1626 l4_len,
1627 1627 (int)MBLKL(mp), (int)MBLKL(datamp),
1628 1628 niph->ip_sum,
1629 1629 ntcph->th_sum,
1630 1630 sum,
1631 1631 nxge_dump_packet((char *)niph, 52)));
1632 1632
1633 1633 cmp = nmp;
1634 1634 while ((nmp = nmp->b_next)->b_next != NULL) {
1635 1635 NXGE_DEBUG_MSG((NULL, TX_CTL,
1636 1636 "==>nxge_do_softlso: middle l4_len %d ", l4_len));
1637 1637 bcopy(cmp->b_rptr, nmp->b_rptr, hdrlen);
1638 1638 nmp->b_wptr = nmp->b_rptr + hdrlen;
1639 1639 niph = (struct ip *)(nmp->b_rptr + ehlen);
1640 1640 niph->ip_id = htons(++ip_id);
1641 1641 niph->ip_len = htons(mss + iphlen + tcphlen);
1642 1642 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen);
1643 1643 tcp_seq += mss;
1644 1644
1645 1645 ntcph->th_flags &= ~(TH_FIN | TH_PUSH | TH_RST | TH_URG);
1646 1646
1647 1647 #ifdef __sparc
1648 1648 tcp_seq_tmp = htonl(tcp_seq);
1649 1649 bcopy(&tcp_seq_tmp, (char *)&ntcph->th_seq, 4);
1650 1650 #else
1651 1651 ntcph->th_seq = htonl(tcp_seq);
1652 1652 #endif
1653 1653 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags;
1654 1654 DB_CKSUMSTART(nmp) = start_offset;
1655 1655 DB_CKSUMSTUFF(nmp) = stuff_offset;
1656 1656
1657 1657 /* calculate IP checksum and TCP pseudo header checksum */
1658 1658 niph->ip_sum = 0;
1659 1659 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen);
1660 1660 ntcph->th_sum = (sum & 0xffff);
1661 1661
1662 1662 NXGE_DEBUG_MSG((NULL, TX_CTL,
1663 1663 "==> nxge_do_softlso: middle ip_sum 0x%x "
1664 1664 "th_sum 0x%x "
1665 1665 " mp $%p (mp_chain $%p) pktlen %d "
1666 1666 "MBLKL(mp): %d, MBLKL(datamp): %d ",
1667 1667 niph->ip_sum,
1668 1668 ntcph->th_sum,
1669 1669 nmp, mp_chain,
1670 1670 pktlen, (int)MBLKL(mp), (int)MBLKL(datamp)));
1671 1671 }
1672 1672
1673 1673 /* Last segment */
1674 1674 /*
1675 1675 * Set FIN and/or PSH flags if present only in the last packet.
1676 1676 * The ip_len could be different from prior packets.
1677 1677 */
1678 1678 bcopy(cmp->b_rptr, nmp->b_rptr, hdrlen);
1679 1679 nmp->b_wptr = nmp->b_rptr + hdrlen;
1680 1680 niph = (struct ip *)(nmp->b_rptr + ehlen);
1681 1681 niph->ip_id = htons(++ip_id);
1682 1682 niph->ip_len = htons(msgsize(nmp->b_cont) + iphlen + tcphlen);
1683 1683 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen);
1684 1684 tcp_seq += mss;
1685 1685 #ifdef __sparc
1686 1686 tcp_seq_tmp = htonl(tcp_seq);
1687 1687 bcopy(&tcp_seq_tmp, (char *)&ntcph->th_seq, 4);
1688 1688 #else
1689 1689 ntcph->th_seq = htonl(tcp_seq);
1690 1690 #endif
1691 1691 ntcph->th_flags = (otcph->th_flags & ~TH_URG);
1692 1692
1693 1693 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags;
1694 1694 DB_CKSUMSTART(nmp) = start_offset;
1695 1695 DB_CKSUMSTUFF(nmp) = stuff_offset;
1696 1696
1697 1697 /* calculate IP checksum and TCP pseudo header checksum */
1698 1698 niph->ip_sum = 0;
1699 1699 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen);
1700 1700
1701 1701 l4_len = ntohs(niph->ip_len) - iphlen;
1702 1702 sum = htons(l4_len) + l4cksum;
1703 1703 sum = (sum & 0xFFFF) + (sum >> 16);
1704 1704 ntcph->th_sum = (sum & 0xffff);
1705 1705
1706 1706 NXGE_DEBUG_MSG((NULL, TX_CTL,
1707 1707 "==> nxge_do_softlso: last next "
1708 1708 "niph->ip_sum 0x%x "
1709 1709 "ntcph->th_sum 0x%x sum 0x%x "
1710 1710 "dump last ip->tcp %s "
1711 1711 "cmp $%p mp $%p (mp_chain $%p) pktlen %d (0x%x) "
1712 1712 "l4_len %d (0x%x) "
1713 1713 "MBLKL(mp): %d, MBLKL(datamp): %d ",
1714 1714 niph->ip_sum,
1715 1715 ntcph->th_sum, sum,
1716 1716 nxge_dump_packet((char *)niph, 52),
1717 1717 cmp, nmp, mp_chain,
1718 1718 pktlen, pktlen,
1719 1719 l4_len,
1720 1720 l4_len,
1721 1721 (int)MBLKL(mp), (int)MBLKL(datamp)));
1722 1722
1723 1723 cleanup_allocated_msgs:
1724 1724 if (do_cleanup) {
1725 1725 NXGE_DEBUG_MSG((NULL, TX_CTL,
1726 1726 "==> nxge_do_softlso: "
1727 1727 "Failed allocating messages, "
1728 1728 "have to clean up and fail!"));
1729 1729 while (mp_chain != NULL) {
1730 1730 nmp = mp_chain;
1731 1731 mp_chain = mp_chain->b_next;
1732 1732 freemsg(nmp);
1733 1733 }
1734 1734 }
1735 1735 /*
1736 1736 * We're done here, so just free the original message and return the
1737 1737 * new message chain, that could be NULL if failed, back to the caller.
1738 1738 */
1739 1739 freemsg(mp);
1740 1740
1741 1741 NXGE_DEBUG_MSG((NULL, TX_CTL,
1742 1742 "<== nxge_do_softlso:mp_chain $%p", mp_chain));
1743 1743 return (mp_chain);
1744 1744 }
1745 1745
1746 1746 /*
1747 1747 * Will be called before NIC driver do further operation on the message.
1748 1748 * The input message may include LSO information, if so, go to softlso logic
1749 1749 * to eliminate the oversized LSO packet for the incapable underlying h/w.
1750 1750 * The return could be the same non-LSO message or a message chain for LSO case.
1751 1751 *
1752 1752 * The driver needs to call this function per packet and process the whole chain
1753 1753 * if applied.
1754 1754 */
1755 1755 static mblk_t *
1756 1756 nxge_lso_eliminate(mblk_t *mp)
1757 1757 {
1758 1758 uint32_t lsoflags;
1759 1759 uint32_t mss;
1760 1760
1761 1761 NXGE_DEBUG_MSG((NULL, TX_CTL,
1762 1762 "==>nxge_lso_eliminate:"));
1763 1763 nxge_lso_info_get(mp, &mss, &lsoflags);
1764 1764
1765 1765 if (lsoflags & HW_LSO) {
1766 1766 mblk_t *nmp;
1767 1767
1768 1768 NXGE_DEBUG_MSG((NULL, TX_CTL,
1769 1769 "==>nxge_lso_eliminate:"
1770 1770 "HW_LSO:mss %d mp $%p",
1771 1771 mss, mp));
1772 1772 if ((nmp = nxge_do_softlso(mp, mss)) != NULL) {
1773 1773 NXGE_DEBUG_MSG((NULL, TX_CTL,
1774 1774 "<== nxge_lso_eliminate: "
1775 1775 "LSO: nmp not NULL nmp $%p mss %d mp $%p",
1776 1776 nmp, mss, mp));
1777 1777 return (nmp);
1778 1778 } else {
1779 1779 NXGE_DEBUG_MSG((NULL, TX_CTL,
1780 1780 "<== nxge_lso_eliminate_ "
1781 1781 "LSO: failed nmp NULL nmp $%p mss %d mp $%p",
1782 1782 nmp, mss, mp));
1783 1783 return (NULL);
1784 1784 }
1785 1785 }
1786 1786
1787 1787 NXGE_DEBUG_MSG((NULL, TX_CTL,
1788 1788 "<== nxge_lso_eliminate"));
1789 1789 return (mp);
1790 1790 }
1791 1791
1792 1792 static uint32_t
1793 1793 nxge_csgen(uint16_t *adr, int len)
1794 1794 {
1795 1795 int i, odd;
1796 1796 uint32_t sum = 0;
1797 1797 uint32_t c = 0;
1798 1798
1799 1799 odd = len % 2;
1800 1800 for (i = 0; i < (len / 2); i++) {
1801 1801 sum += (adr[i] & 0xffff);
1802 1802 }
1803 1803 if (odd) {
1804 1804 sum += adr[len / 2] & 0xff00;
1805 1805 }
1806 1806 while ((c = ((sum & 0xffff0000) >> 16)) != 0) {
1807 1807 sum &= 0xffff;
1808 1808 sum += c;
1809 1809 }
1810 1810 return (~sum & 0xffff);
1811 1811 }
↓ open down ↓ |
1424 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX