Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/inet/tcp/tcp_input.c
+++ new/usr/src/uts/common/inet/tcp/tcp_input.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2011 Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 /* This file contains all TCP input processing functions. */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/stream.h>
32 32 #include <sys/strsun.h>
33 33 #include <sys/strsubr.h>
34 34 #include <sys/stropts.h>
35 35 #include <sys/strlog.h>
36 36 #define _SUN_TPI_VERSION 2
37 37 #include <sys/tihdr.h>
38 38 #include <sys/suntpi.h>
39 39 #include <sys/xti_inet.h>
40 40 #include <sys/squeue_impl.h>
41 41 #include <sys/squeue.h>
42 42 #include <sys/tsol/tnet.h>
43 43
44 44 #include <inet/common.h>
45 45 #include <inet/ip.h>
46 46 #include <inet/tcp.h>
47 47 #include <inet/tcp_impl.h>
48 48 #include <inet/tcp_cluster.h>
49 49 #include <inet/proto_set.h>
50 50 #include <inet/ipsec_impl.h>
51 51
52 52 /*
53 53 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing
54 54 */
55 55
56 56 #ifdef _BIG_ENDIAN
57 57 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
58 58 (TCPOPT_TSTAMP << 8) | 10)
59 59 #else
60 60 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \
61 61 (TCPOPT_NOP << 8) | TCPOPT_NOP)
62 62 #endif
63 63
64 64 /*
65 65 * Flags returned from tcp_parse_options.
66 66 */
67 67 #define TCP_OPT_MSS_PRESENT 1
68 68 #define TCP_OPT_WSCALE_PRESENT 2
69 69 #define TCP_OPT_TSTAMP_PRESENT 4
70 70 #define TCP_OPT_SACK_OK_PRESENT 8
71 71 #define TCP_OPT_SACK_PRESENT 16
72 72
73 73 /*
74 74 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days
75 75 */
76 76 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz))
77 77
78 78 /*
79 79 * Since tcp_listener is not cleared atomically with tcp_detached
80 80 * being cleared we need this extra bit to tell a detached connection
81 81 * apart from one that is in the process of being accepted.
82 82 */
83 83 #define TCP_IS_DETACHED_NONEAGER(tcp) \
84 84 (TCP_IS_DETACHED(tcp) && \
85 85 (!(tcp)->tcp_hard_binding))
86 86
87 87 /*
88 88 * Steps to do when a tcp_t moves to TIME-WAIT state.
89 89 *
90 90 * This connection is done, we don't need to account for it. Decrement
91 91 * the listener connection counter if needed.
92 92 *
93 93 * Decrement the connection counter of the stack. Note that this counter
94 94 * is per CPU. So the total number of connections in a stack is the sum of all
95 95 * of them. Since there is no lock for handling all of them exclusively, the
96 96 * resulting sum is only an approximation.
97 97 *
98 98 * Unconditionally clear the exclusive binding bit so this TIME-WAIT
99 99 * connection won't interfere with new ones.
100 100 *
101 101 * Start the TIME-WAIT timer. If upper layer has not closed the connection,
102 102 * the timer is handled within the context of this tcp_t. When the timer
103 103 * fires, tcp_clean_death() is called. If upper layer closes the connection
104 104 * during this period, tcp_time_wait_append() will be called to add this
105 105 * tcp_t to the global TIME-WAIT list. Note that this means that the
106 106 * actual wait time in TIME-WAIT state will be longer than the
107 107 * tcps_time_wait_interval since the period before upper layer closes the
108 108 * connection is not accounted for when tcp_time_wait_append() is called.
109 109 *
110 110 * If uppser layer has closed the connection, call tcp_time_wait_append()
111 111 * directly.
112 112 *
113 113 */
114 114 #define SET_TIME_WAIT(tcps, tcp, connp) \
115 115 { \
116 116 (tcp)->tcp_state = TCPS_TIME_WAIT; \
117 117 if ((tcp)->tcp_listen_cnt != NULL) \
118 118 TCP_DECR_LISTEN_CNT(tcp); \
119 119 atomic_dec_64( \
120 120 (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt); \
121 121 (connp)->conn_exclbind = 0; \
122 122 if (!TCP_IS_DETACHED(tcp)) { \
123 123 TCP_TIMER_RESTART(tcp, (tcps)->tcps_time_wait_interval); \
124 124 } else { \
125 125 tcp_time_wait_append(tcp); \
126 126 TCP_DBGSTAT(tcps, tcp_rput_time_wait); \
127 127 } \
128 128 }
129 129
130 130 /*
131 131 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more
132 132 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent
133 133 * data, TCP will not respond with an ACK. RFC 793 requires that
134 134 * TCP responds with an ACK for such a bogus ACK. By not following
135 135 * the RFC, we prevent TCP from getting into an ACK storm if somehow
136 136 * an attacker successfully spoofs an acceptable segment to our
137 137 * peer; or when our peer is "confused."
138 138 */
139 139 static uint32_t tcp_drop_ack_unsent_cnt = 10;
140 140
141 141 /*
142 142 * To protect TCP against attacker using a small window and requesting
143 143 * large amount of data (DoS attack by conuming memory), TCP checks the
144 144 * window advertised in the last ACK of the 3-way handshake. TCP uses
145 145 * the tcp_mss (the size of one packet) value for comparion. The window
146 146 * should be larger than tcp_mss. But while a sane TCP should advertise
147 147 * a receive window larger than or equal to 4*MSS to avoid stop and go
148 148 * tarrfic, not all TCP stacks do that. This is especially true when
149 149 * tcp_mss is a big value.
150 150 *
151 151 * To work around this issue, an additional fixed value for comparison
152 152 * is also used. If the advertised window is smaller than both tcp_mss
153 153 * and tcp_init_wnd_chk, the ACK is considered as invalid. So for large
154 154 * tcp_mss value (say, 8K), a window larger than tcp_init_wnd_chk but
155 155 * smaller than 8K is considered to be OK.
156 156 */
157 157 static uint32_t tcp_init_wnd_chk = 4096;
158 158
159 159 /* Process ICMP source quench message or not. */
160 160 static boolean_t tcp_icmp_source_quench = B_FALSE;
161 161
162 162 static boolean_t tcp_outbound_squeue_switch = B_FALSE;
163 163
164 164 static mblk_t *tcp_conn_create_v4(conn_t *, conn_t *, mblk_t *,
165 165 ip_recv_attr_t *);
166 166 static mblk_t *tcp_conn_create_v6(conn_t *, conn_t *, mblk_t *,
167 167 ip_recv_attr_t *);
168 168 static boolean_t tcp_drop_q0(tcp_t *);
169 169 static void tcp_icmp_error_ipv6(tcp_t *, mblk_t *, ip_recv_attr_t *);
170 170 static mblk_t *tcp_input_add_ancillary(tcp_t *, mblk_t *, ip_pkt_t *,
171 171 ip_recv_attr_t *);
172 172 static void tcp_input_listener(void *, mblk_t *, void *, ip_recv_attr_t *);
173 173 static int tcp_parse_options(tcpha_t *, tcp_opt_t *);
174 174 static void tcp_process_options(tcp_t *, tcpha_t *);
175 175 static mblk_t *tcp_reass(tcp_t *, mblk_t *, uint32_t);
176 176 static void tcp_reass_elim_overlap(tcp_t *, mblk_t *);
177 177 static void tcp_rsrv_input(void *, mblk_t *, void *, ip_recv_attr_t *);
178 178 static void tcp_set_rto(tcp_t *, time_t);
179 179 static void tcp_setcred_data(mblk_t *, ip_recv_attr_t *);
180 180
181 181 /*
182 182 * Set the MSS associated with a particular tcp based on its current value,
183 183 * and a new one passed in. Observe minimums and maximums, and reset other
184 184 * state variables that we want to view as multiples of MSS.
185 185 *
186 186 * The value of MSS could be either increased or descreased.
187 187 */
188 188 void
189 189 tcp_mss_set(tcp_t *tcp, uint32_t mss)
190 190 {
191 191 uint32_t mss_max;
192 192 tcp_stack_t *tcps = tcp->tcp_tcps;
193 193 conn_t *connp = tcp->tcp_connp;
194 194
195 195 if (connp->conn_ipversion == IPV4_VERSION)
196 196 mss_max = tcps->tcps_mss_max_ipv4;
197 197 else
198 198 mss_max = tcps->tcps_mss_max_ipv6;
199 199
200 200 if (mss < tcps->tcps_mss_min)
201 201 mss = tcps->tcps_mss_min;
202 202 if (mss > mss_max)
203 203 mss = mss_max;
204 204 /*
205 205 * Unless naglim has been set by our client to
206 206 * a non-mss value, force naglim to track mss.
207 207 * This can help to aggregate small writes.
208 208 */
209 209 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim)
210 210 tcp->tcp_naglim = mss;
211 211 /*
212 212 * TCP should be able to buffer at least 4 MSS data for obvious
213 213 * performance reason.
214 214 */
215 215 if ((mss << 2) > connp->conn_sndbuf)
216 216 connp->conn_sndbuf = mss << 2;
217 217
218 218 /*
219 219 * Set the send lowater to at least twice of MSS.
220 220 */
221 221 if ((mss << 1) > connp->conn_sndlowat)
222 222 connp->conn_sndlowat = mss << 1;
223 223
224 224 /*
225 225 * Update tcp_cwnd according to the new value of MSS. Keep the
226 226 * previous ratio to preserve the transmit rate.
227 227 */
228 228 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss;
229 229 tcp->tcp_cwnd_cnt = 0;
230 230
231 231 tcp->tcp_mss = mss;
232 232 (void) tcp_maxpsz_set(tcp, B_TRUE);
233 233 }
234 234
235 235 /*
236 236 * Extract option values from a tcp header. We put any found values into the
237 237 * tcpopt struct and return a bitmask saying which options were found.
238 238 */
239 239 static int
240 240 tcp_parse_options(tcpha_t *tcpha, tcp_opt_t *tcpopt)
241 241 {
242 242 uchar_t *endp;
243 243 int len;
244 244 uint32_t mss;
245 245 uchar_t *up = (uchar_t *)tcpha;
246 246 int found = 0;
247 247 int32_t sack_len;
248 248 tcp_seq sack_begin, sack_end;
249 249 tcp_t *tcp;
250 250
251 251 endp = up + TCP_HDR_LENGTH(tcpha);
252 252 up += TCP_MIN_HEADER_LENGTH;
253 253 while (up < endp) {
254 254 len = endp - up;
255 255 switch (*up) {
256 256 case TCPOPT_EOL:
257 257 break;
258 258
259 259 case TCPOPT_NOP:
260 260 up++;
261 261 continue;
262 262
263 263 case TCPOPT_MAXSEG:
264 264 if (len < TCPOPT_MAXSEG_LEN ||
265 265 up[1] != TCPOPT_MAXSEG_LEN)
266 266 break;
267 267
268 268 mss = BE16_TO_U16(up+2);
269 269 /* Caller must handle tcp_mss_min and tcp_mss_max_* */
270 270 tcpopt->tcp_opt_mss = mss;
271 271 found |= TCP_OPT_MSS_PRESENT;
272 272
273 273 up += TCPOPT_MAXSEG_LEN;
274 274 continue;
275 275
276 276 case TCPOPT_WSCALE:
277 277 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN)
278 278 break;
279 279
280 280 if (up[2] > TCP_MAX_WINSHIFT)
281 281 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT;
282 282 else
283 283 tcpopt->tcp_opt_wscale = up[2];
284 284 found |= TCP_OPT_WSCALE_PRESENT;
285 285
286 286 up += TCPOPT_WS_LEN;
287 287 continue;
288 288
289 289 case TCPOPT_SACK_PERMITTED:
290 290 if (len < TCPOPT_SACK_OK_LEN ||
291 291 up[1] != TCPOPT_SACK_OK_LEN)
292 292 break;
293 293 found |= TCP_OPT_SACK_OK_PRESENT;
294 294 up += TCPOPT_SACK_OK_LEN;
295 295 continue;
296 296
297 297 case TCPOPT_SACK:
298 298 if (len <= 2 || up[1] <= 2 || len < up[1])
299 299 break;
300 300
301 301 /* If TCP is not interested in SACK blks... */
302 302 if ((tcp = tcpopt->tcp) == NULL) {
303 303 up += up[1];
304 304 continue;
305 305 }
306 306 sack_len = up[1] - TCPOPT_HEADER_LEN;
307 307 up += TCPOPT_HEADER_LEN;
308 308
309 309 /*
310 310 * If the list is empty, allocate one and assume
311 311 * nothing is sack'ed.
312 312 */
313 313 if (tcp->tcp_notsack_list == NULL) {
314 314 tcp_notsack_update(&(tcp->tcp_notsack_list),
315 315 tcp->tcp_suna, tcp->tcp_snxt,
316 316 &(tcp->tcp_num_notsack_blk),
317 317 &(tcp->tcp_cnt_notsack_list));
318 318
319 319 /*
320 320 * Make sure tcp_notsack_list is not NULL.
321 321 * This happens when kmem_alloc(KM_NOSLEEP)
322 322 * returns NULL.
323 323 */
324 324 if (tcp->tcp_notsack_list == NULL) {
325 325 up += sack_len;
326 326 continue;
327 327 }
328 328 tcp->tcp_fack = tcp->tcp_suna;
329 329 }
330 330
331 331 while (sack_len > 0) {
332 332 if (up + 8 > endp) {
333 333 up = endp;
334 334 break;
335 335 }
336 336 sack_begin = BE32_TO_U32(up);
337 337 up += 4;
338 338 sack_end = BE32_TO_U32(up);
339 339 up += 4;
340 340 sack_len -= 8;
341 341 /*
342 342 * Bounds checking. Make sure the SACK
343 343 * info is within tcp_suna and tcp_snxt.
344 344 * If this SACK blk is out of bound, ignore
345 345 * it but continue to parse the following
346 346 * blks.
347 347 */
348 348 if (SEQ_LEQ(sack_end, sack_begin) ||
349 349 SEQ_LT(sack_begin, tcp->tcp_suna) ||
350 350 SEQ_GT(sack_end, tcp->tcp_snxt)) {
351 351 continue;
352 352 }
353 353 tcp_notsack_insert(&(tcp->tcp_notsack_list),
354 354 sack_begin, sack_end,
355 355 &(tcp->tcp_num_notsack_blk),
356 356 &(tcp->tcp_cnt_notsack_list));
357 357 if (SEQ_GT(sack_end, tcp->tcp_fack)) {
358 358 tcp->tcp_fack = sack_end;
359 359 }
360 360 }
361 361 found |= TCP_OPT_SACK_PRESENT;
362 362 continue;
363 363
364 364 case TCPOPT_TSTAMP:
365 365 if (len < TCPOPT_TSTAMP_LEN ||
366 366 up[1] != TCPOPT_TSTAMP_LEN)
367 367 break;
368 368
369 369 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2);
370 370 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6);
371 371
372 372 found |= TCP_OPT_TSTAMP_PRESENT;
373 373
374 374 up += TCPOPT_TSTAMP_LEN;
375 375 continue;
376 376
377 377 default:
378 378 if (len <= 1 || len < (int)up[1] || up[1] == 0)
379 379 break;
380 380 up += up[1];
381 381 continue;
382 382 }
383 383 break;
384 384 }
385 385 return (found);
386 386 }
387 387
388 388 /*
389 389 * Process all TCP option in SYN segment. Note that this function should
390 390 * be called after tcp_set_destination() is called so that the necessary info
391 391 * from IRE is already set in the tcp structure.
392 392 *
393 393 * This function sets up the correct tcp_mss value according to the
394 394 * MSS option value and our header size. It also sets up the window scale
395 395 * and timestamp values, and initialize SACK info blocks. But it does not
396 396 * change receive window size after setting the tcp_mss value. The caller
397 397 * should do the appropriate change.
398 398 */
399 399 static void
400 400 tcp_process_options(tcp_t *tcp, tcpha_t *tcpha)
401 401 {
402 402 int options;
403 403 tcp_opt_t tcpopt;
404 404 uint32_t mss_max;
405 405 char *tmp_tcph;
406 406 tcp_stack_t *tcps = tcp->tcp_tcps;
407 407 conn_t *connp = tcp->tcp_connp;
408 408
409 409 tcpopt.tcp = NULL;
410 410 options = tcp_parse_options(tcpha, &tcpopt);
411 411
412 412 /*
413 413 * Process MSS option. Note that MSS option value does not account
414 414 * for IP or TCP options. This means that it is equal to MTU - minimum
415 415 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for
416 416 * IPv6.
417 417 */
418 418 if (!(options & TCP_OPT_MSS_PRESENT)) {
419 419 if (connp->conn_ipversion == IPV4_VERSION)
420 420 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4;
421 421 else
422 422 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6;
423 423 } else {
424 424 if (connp->conn_ipversion == IPV4_VERSION)
425 425 mss_max = tcps->tcps_mss_max_ipv4;
426 426 else
427 427 mss_max = tcps->tcps_mss_max_ipv6;
428 428 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min)
429 429 tcpopt.tcp_opt_mss = tcps->tcps_mss_min;
430 430 else if (tcpopt.tcp_opt_mss > mss_max)
431 431 tcpopt.tcp_opt_mss = mss_max;
432 432 }
433 433
434 434 /* Process Window Scale option. */
435 435 if (options & TCP_OPT_WSCALE_PRESENT) {
436 436 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale;
437 437 tcp->tcp_snd_ws_ok = B_TRUE;
438 438 } else {
439 439 tcp->tcp_snd_ws = B_FALSE;
440 440 tcp->tcp_snd_ws_ok = B_FALSE;
441 441 tcp->tcp_rcv_ws = B_FALSE;
442 442 }
443 443
444 444 /* Process Timestamp option. */
445 445 if ((options & TCP_OPT_TSTAMP_PRESENT) &&
446 446 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) {
447 447 tmp_tcph = (char *)tcp->tcp_tcpha;
448 448
449 449 tcp->tcp_snd_ts_ok = B_TRUE;
450 450 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
451 451 tcp->tcp_last_rcv_lbolt = ddi_get_lbolt64();
452 452 ASSERT(OK_32PTR(tmp_tcph));
453 453 ASSERT(connp->conn_ht_ulp_len == TCP_MIN_HEADER_LENGTH);
454 454
455 455 /* Fill in our template header with basic timestamp option. */
456 456 tmp_tcph += connp->conn_ht_ulp_len;
457 457 tmp_tcph[0] = TCPOPT_NOP;
458 458 tmp_tcph[1] = TCPOPT_NOP;
459 459 tmp_tcph[2] = TCPOPT_TSTAMP;
460 460 tmp_tcph[3] = TCPOPT_TSTAMP_LEN;
461 461 connp->conn_ht_iphc_len += TCPOPT_REAL_TS_LEN;
462 462 connp->conn_ht_ulp_len += TCPOPT_REAL_TS_LEN;
463 463 tcp->tcp_tcpha->tha_offset_and_reserved += (3 << 4);
464 464 } else {
465 465 tcp->tcp_snd_ts_ok = B_FALSE;
466 466 }
467 467
468 468 /*
469 469 * Process SACK options. If SACK is enabled for this connection,
470 470 * then allocate the SACK info structure. Note the following ways
471 471 * when tcp_snd_sack_ok is set to true.
472 472 *
473 473 * For active connection: in tcp_set_destination() called in
474 474 * tcp_connect().
475 475 *
476 476 * For passive connection: in tcp_set_destination() called in
477 477 * tcp_input_listener().
478 478 *
479 479 * That's the reason why the extra TCP_IS_DETACHED() check is there.
480 480 * That check makes sure that if we did not send a SACK OK option,
481 481 * we will not enable SACK for this connection even though the other
482 482 * side sends us SACK OK option. For active connection, the SACK
483 483 * info structure has already been allocated. So we need to free
484 484 * it if SACK is disabled.
485 485 */
486 486 if ((options & TCP_OPT_SACK_OK_PRESENT) &&
487 487 (tcp->tcp_snd_sack_ok ||
488 488 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) {
489 489 ASSERT(tcp->tcp_num_sack_blk == 0);
490 490 ASSERT(tcp->tcp_notsack_list == NULL);
491 491
492 492 tcp->tcp_snd_sack_ok = B_TRUE;
493 493 if (tcp->tcp_snd_ts_ok) {
494 494 tcp->tcp_max_sack_blk = 3;
495 495 } else {
496 496 tcp->tcp_max_sack_blk = 4;
497 497 }
498 498 } else if (tcp->tcp_snd_sack_ok) {
499 499 /*
500 500 * Resetting tcp_snd_sack_ok to B_FALSE so that
501 501 * no SACK info will be used for this
502 502 * connection. This assumes that SACK usage
503 503 * permission is negotiated. This may need
504 504 * to be changed once this is clarified.
505 505 */
506 506 ASSERT(tcp->tcp_num_sack_blk == 0);
507 507 ASSERT(tcp->tcp_notsack_list == NULL);
508 508 tcp->tcp_snd_sack_ok = B_FALSE;
509 509 }
510 510
511 511 /*
512 512 * Now we know the exact TCP/IP header length, subtract
513 513 * that from tcp_mss to get our side's MSS.
514 514 */
515 515 tcp->tcp_mss -= connp->conn_ht_iphc_len;
516 516
517 517 /*
518 518 * Here we assume that the other side's header size will be equal to
519 519 * our header size. We calculate the real MSS accordingly. Need to
520 520 * take into additional stuffs IPsec puts in.
521 521 *
522 522 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header)
523 523 */
524 524 tcpopt.tcp_opt_mss -= connp->conn_ht_iphc_len +
525 525 tcp->tcp_ipsec_overhead -
526 526 ((connp->conn_ipversion == IPV4_VERSION ?
527 527 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH);
528 528
529 529 /*
530 530 * Set MSS to the smaller one of both ends of the connection.
531 531 * We should not have called tcp_mss_set() before, but our
532 532 * side of the MSS should have been set to a proper value
533 533 * by tcp_set_destination(). tcp_mss_set() will also set up the
534 534 * STREAM head parameters properly.
535 535 *
536 536 * If we have a larger-than-16-bit window but the other side
537 537 * didn't want to do window scale, tcp_rwnd_set() will take
538 538 * care of that.
539 539 */
540 540 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss));
541 541
542 542 /*
543 543 * Initialize tcp_cwnd value. After tcp_mss_set(), tcp_mss has been
544 544 * updated properly.
545 545 */
546 546 TCP_SET_INIT_CWND(tcp, tcp->tcp_mss, tcps->tcps_slow_start_initial);
547 547 }
548 548
549 549 /*
550 550 * Add a new piece to the tcp reassembly queue. If the gap at the beginning
551 551 * is filled, return as much as we can. The message passed in may be
552 552 * multi-part, chained using b_cont. "start" is the starting sequence
553 553 * number for this piece.
554 554 */
555 555 static mblk_t *
556 556 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start)
557 557 {
558 558 uint32_t end;
559 559 mblk_t *mp1;
560 560 mblk_t *mp2;
561 561 mblk_t *next_mp;
562 562 uint32_t u1;
563 563 tcp_stack_t *tcps = tcp->tcp_tcps;
564 564
565 565
566 566 /* Walk through all the new pieces. */
567 567 do {
568 568 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
569 569 (uintptr_t)INT_MAX);
570 570 end = start + (int)(mp->b_wptr - mp->b_rptr);
571 571 next_mp = mp->b_cont;
572 572 if (start == end) {
573 573 /* Empty. Blast it. */
574 574 freeb(mp);
575 575 continue;
576 576 }
577 577 mp->b_cont = NULL;
578 578 TCP_REASS_SET_SEQ(mp, start);
579 579 TCP_REASS_SET_END(mp, end);
580 580 mp1 = tcp->tcp_reass_tail;
581 581 if (!mp1) {
582 582 tcp->tcp_reass_tail = mp;
583 583 tcp->tcp_reass_head = mp;
584 584 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs);
585 585 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes,
586 586 end - start);
587 587 continue;
588 588 }
589 589 /* New stuff completely beyond tail? */
590 590 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) {
591 591 /* Link it on end. */
592 592 mp1->b_cont = mp;
593 593 tcp->tcp_reass_tail = mp;
594 594 TCPS_BUMP_MIB(tcps, tcpInDataUnorderSegs);
595 595 TCPS_UPDATE_MIB(tcps, tcpInDataUnorderBytes,
596 596 end - start);
597 597 continue;
598 598 }
599 599 mp1 = tcp->tcp_reass_head;
600 600 u1 = TCP_REASS_SEQ(mp1);
601 601 /* New stuff at the front? */
602 602 if (SEQ_LT(start, u1)) {
603 603 /* Yes... Check for overlap. */
604 604 mp->b_cont = mp1;
605 605 tcp->tcp_reass_head = mp;
606 606 tcp_reass_elim_overlap(tcp, mp);
607 607 continue;
608 608 }
609 609 /*
610 610 * The new piece fits somewhere between the head and tail.
611 611 * We find our slot, where mp1 precedes us and mp2 trails.
612 612 */
613 613 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) {
614 614 u1 = TCP_REASS_SEQ(mp2);
615 615 if (SEQ_LEQ(start, u1))
616 616 break;
617 617 }
618 618 /* Link ourselves in */
619 619 mp->b_cont = mp2;
620 620 mp1->b_cont = mp;
621 621
622 622 /* Trim overlap with following mblk(s) first */
623 623 tcp_reass_elim_overlap(tcp, mp);
624 624
625 625 /* Trim overlap with preceding mblk */
626 626 tcp_reass_elim_overlap(tcp, mp1);
627 627
628 628 } while (start = end, mp = next_mp);
629 629 mp1 = tcp->tcp_reass_head;
630 630 /* Anything ready to go? */
631 631 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt)
632 632 return (NULL);
633 633 /* Eat what we can off the queue */
634 634 for (;;) {
635 635 mp = mp1->b_cont;
636 636 end = TCP_REASS_END(mp1);
637 637 TCP_REASS_SET_SEQ(mp1, 0);
638 638 TCP_REASS_SET_END(mp1, 0);
639 639 if (!mp) {
640 640 tcp->tcp_reass_tail = NULL;
641 641 break;
642 642 }
643 643 if (end != TCP_REASS_SEQ(mp)) {
644 644 mp1->b_cont = NULL;
645 645 break;
646 646 }
647 647 mp1 = mp;
648 648 }
649 649 mp1 = tcp->tcp_reass_head;
650 650 tcp->tcp_reass_head = mp;
651 651 return (mp1);
652 652 }
653 653
654 654 /* Eliminate any overlap that mp may have over later mblks */
655 655 static void
656 656 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp)
657 657 {
658 658 uint32_t end;
659 659 mblk_t *mp1;
660 660 uint32_t u1;
661 661 tcp_stack_t *tcps = tcp->tcp_tcps;
662 662
663 663 end = TCP_REASS_END(mp);
664 664 while ((mp1 = mp->b_cont) != NULL) {
665 665 u1 = TCP_REASS_SEQ(mp1);
666 666 if (!SEQ_GT(end, u1))
667 667 break;
668 668 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) {
669 669 mp->b_wptr -= end - u1;
670 670 TCP_REASS_SET_END(mp, u1);
671 671 TCPS_BUMP_MIB(tcps, tcpInDataPartDupSegs);
672 672 TCPS_UPDATE_MIB(tcps, tcpInDataPartDupBytes,
673 673 end - u1);
674 674 break;
675 675 }
676 676 mp->b_cont = mp1->b_cont;
677 677 TCP_REASS_SET_SEQ(mp1, 0);
678 678 TCP_REASS_SET_END(mp1, 0);
679 679 freeb(mp1);
680 680 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs);
681 681 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes, end - u1);
682 682 }
683 683 if (!mp1)
684 684 tcp->tcp_reass_tail = mp;
685 685 }
686 686
687 687 /*
688 688 * This function does PAWS protection check. Returns B_TRUE if the
689 689 * segment passes the PAWS test, else returns B_FALSE.
690 690 */
691 691 boolean_t
692 692 tcp_paws_check(tcp_t *tcp, tcpha_t *tcpha, tcp_opt_t *tcpoptp)
693 693 {
694 694 uint8_t flags;
695 695 int options;
696 696 uint8_t *up;
697 697 conn_t *connp = tcp->tcp_connp;
698 698
699 699 flags = (unsigned int)tcpha->tha_flags & 0xFF;
700 700 /*
701 701 * If timestamp option is aligned nicely, get values inline,
702 702 * otherwise call general routine to parse. Only do that
703 703 * if timestamp is the only option.
704 704 */
705 705 if (TCP_HDR_LENGTH(tcpha) == (uint32_t)TCP_MIN_HEADER_LENGTH +
706 706 TCPOPT_REAL_TS_LEN &&
707 707 OK_32PTR((up = ((uint8_t *)tcpha) +
708 708 TCP_MIN_HEADER_LENGTH)) &&
709 709 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) {
710 710 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4));
711 711 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8));
712 712
713 713 options = TCP_OPT_TSTAMP_PRESENT;
714 714 } else {
715 715 if (tcp->tcp_snd_sack_ok) {
716 716 tcpoptp->tcp = tcp;
717 717 } else {
718 718 tcpoptp->tcp = NULL;
719 719 }
720 720 options = tcp_parse_options(tcpha, tcpoptp);
721 721 }
722 722
723 723 if (options & TCP_OPT_TSTAMP_PRESENT) {
724 724 /*
725 725 * Do PAWS per RFC 1323 section 4.2. Accept RST
726 726 * regardless of the timestamp, page 18 RFC 1323.bis.
727 727 */
728 728 if ((flags & TH_RST) == 0 &&
729 729 TSTMP_LT(tcpoptp->tcp_opt_ts_val,
730 730 tcp->tcp_ts_recent)) {
731 731 if (LBOLT_FASTPATH64 <
732 732 (tcp->tcp_last_rcv_lbolt + PAWS_TIMEOUT)) {
733 733 /* This segment is not acceptable. */
734 734 return (B_FALSE);
735 735 } else {
736 736 /*
737 737 * Connection has been idle for
738 738 * too long. Reset the timestamp
739 739 * and assume the segment is valid.
740 740 */
741 741 tcp->tcp_ts_recent =
742 742 tcpoptp->tcp_opt_ts_val;
743 743 }
744 744 }
745 745 } else {
746 746 /*
747 747 * If we don't get a timestamp on every packet, we
748 748 * figure we can't really trust 'em, so we stop sending
749 749 * and parsing them.
750 750 */
751 751 tcp->tcp_snd_ts_ok = B_FALSE;
752 752
753 753 connp->conn_ht_iphc_len -= TCPOPT_REAL_TS_LEN;
754 754 connp->conn_ht_ulp_len -= TCPOPT_REAL_TS_LEN;
755 755 tcp->tcp_tcpha->tha_offset_and_reserved -= (3 << 4);
756 756 /*
757 757 * Adjust the tcp_mss and tcp_cwnd accordingly. We avoid
758 758 * doing a slow start here so as to not to lose on the
759 759 * transfer rate built up so far.
760 760 */
761 761 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN);
762 762 if (tcp->tcp_snd_sack_ok)
763 763 tcp->tcp_max_sack_blk = 4;
764 764 }
765 765 return (B_TRUE);
766 766 }
767 767
768 768 /*
769 769 * Defense for the SYN attack -
770 770 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest
771 771 * one from the list of droppable eagers. This list is a subset of q0.
772 772 * see comments before the definition of MAKE_DROPPABLE().
773 773 * 2. Don't drop a SYN request before its first timeout. This gives every
774 774 * request at least til the first timeout to complete its 3-way handshake.
775 775 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many
776 776 * requests currently on the queue that has timed out. This will be used
777 777 * as an indicator of whether an attack is under way, so that appropriate
778 778 * actions can be taken. (It's incremented in tcp_timer() and decremented
779 779 * either when eager goes into ESTABLISHED, or gets freed up.)
780 780 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on
781 781 * # of timeout drops back to <= q0len/32 => SYN alert off
782 782 */
783 783 static boolean_t
784 784 tcp_drop_q0(tcp_t *tcp)
785 785 {
786 786 tcp_t *eager;
787 787 mblk_t *mp;
788 788 tcp_stack_t *tcps = tcp->tcp_tcps;
789 789
790 790 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock));
791 791 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0);
792 792
793 793 /* Pick oldest eager from the list of droppable eagers */
794 794 eager = tcp->tcp_eager_prev_drop_q0;
795 795
796 796 /* If list is empty. return B_FALSE */
797 797 if (eager == tcp) {
798 798 return (B_FALSE);
799 799 }
800 800
801 801 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */
802 802 if ((mp = allocb(0, BPRI_HI)) == NULL)
803 803 return (B_FALSE);
804 804
805 805 /*
806 806 * Take this eager out from the list of droppable eagers since we are
807 807 * going to drop it.
808 808 */
809 809 MAKE_UNDROPPABLE(eager);
810 810
811 811 if (tcp->tcp_connp->conn_debug) {
812 812 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE,
813 813 "tcp_drop_q0: listen half-open queue (max=%d) overflow"
814 814 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0,
815 815 tcp->tcp_conn_req_cnt_q0,
816 816 tcp_display(tcp, NULL, DISP_PORT_ONLY));
817 817 }
818 818
819 819 TCPS_BUMP_MIB(tcps, tcpHalfOpenDrop);
820 820
821 821 /* Put a reference on the conn as we are enqueueing it in the sqeue */
822 822 CONN_INC_REF(eager->tcp_connp);
823 823
824 824 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
825 825 tcp_clean_death_wrapper, eager->tcp_connp, NULL,
826 826 SQ_FILL, SQTAG_TCP_DROP_Q0);
827 827
828 828 return (B_TRUE);
829 829 }
830 830
831 831 /*
832 832 * Handle a SYN on an AF_INET6 socket; can be either IPv4 or IPv6
833 833 */
834 834 static mblk_t *
835 835 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp,
836 836 ip_recv_attr_t *ira)
837 837 {
838 838 tcp_t *ltcp = lconnp->conn_tcp;
839 839 tcp_t *tcp = connp->conn_tcp;
840 840 mblk_t *tpi_mp;
841 841 ipha_t *ipha;
842 842 ip6_t *ip6h;
843 843 sin6_t sin6;
844 844 uint_t ifindex = ira->ira_ruifindex;
845 845 tcp_stack_t *tcps = tcp->tcp_tcps;
846 846
847 847 if (ira->ira_flags & IRAF_IS_IPV4) {
848 848 ipha = (ipha_t *)mp->b_rptr;
849 849
850 850 connp->conn_ipversion = IPV4_VERSION;
851 851 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6);
852 852 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6);
853 853 connp->conn_saddr_v6 = connp->conn_laddr_v6;
854 854
855 855 sin6 = sin6_null;
856 856 sin6.sin6_addr = connp->conn_faddr_v6;
857 857 sin6.sin6_port = connp->conn_fport;
858 858 sin6.sin6_family = AF_INET6;
859 859 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6,
860 860 IPCL_ZONEID(lconnp), tcps->tcps_netstack);
861 861
862 862 if (connp->conn_recv_ancillary.crb_recvdstaddr) {
863 863 sin6_t sin6d;
864 864
865 865 sin6d = sin6_null;
866 866 sin6d.sin6_addr = connp->conn_laddr_v6;
867 867 sin6d.sin6_port = connp->conn_lport;
868 868 sin6d.sin6_family = AF_INET;
869 869 tpi_mp = mi_tpi_extconn_ind(NULL,
870 870 (char *)&sin6d, sizeof (sin6_t),
871 871 (char *)&tcp,
872 872 (t_scalar_t)sizeof (intptr_t),
873 873 (char *)&sin6d, sizeof (sin6_t),
874 874 (t_scalar_t)ltcp->tcp_conn_req_seqnum);
875 875 } else {
876 876 tpi_mp = mi_tpi_conn_ind(NULL,
877 877 (char *)&sin6, sizeof (sin6_t),
878 878 (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
879 879 (t_scalar_t)ltcp->tcp_conn_req_seqnum);
880 880 }
881 881 } else {
882 882 ip6h = (ip6_t *)mp->b_rptr;
883 883
884 884 connp->conn_ipversion = IPV6_VERSION;
885 885 connp->conn_laddr_v6 = ip6h->ip6_dst;
886 886 connp->conn_faddr_v6 = ip6h->ip6_src;
887 887 connp->conn_saddr_v6 = connp->conn_laddr_v6;
888 888
889 889 sin6 = sin6_null;
890 890 sin6.sin6_addr = connp->conn_faddr_v6;
891 891 sin6.sin6_port = connp->conn_fport;
892 892 sin6.sin6_family = AF_INET6;
893 893 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK;
894 894 sin6.__sin6_src_id = ip_srcid_find_addr(&connp->conn_laddr_v6,
895 895 IPCL_ZONEID(lconnp), tcps->tcps_netstack);
896 896
897 897 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) {
898 898 /* Pass up the scope_id of remote addr */
899 899 sin6.sin6_scope_id = ifindex;
900 900 } else {
901 901 sin6.sin6_scope_id = 0;
902 902 }
903 903 if (connp->conn_recv_ancillary.crb_recvdstaddr) {
904 904 sin6_t sin6d;
905 905
906 906 sin6d = sin6_null;
907 907 sin6.sin6_addr = connp->conn_laddr_v6;
908 908 sin6d.sin6_port = connp->conn_lport;
909 909 sin6d.sin6_family = AF_INET6;
910 910 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_laddr_v6))
911 911 sin6d.sin6_scope_id = ifindex;
912 912
913 913 tpi_mp = mi_tpi_extconn_ind(NULL,
914 914 (char *)&sin6d, sizeof (sin6_t),
915 915 (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
916 916 (char *)&sin6d, sizeof (sin6_t),
917 917 (t_scalar_t)ltcp->tcp_conn_req_seqnum);
918 918 } else {
919 919 tpi_mp = mi_tpi_conn_ind(NULL,
920 920 (char *)&sin6, sizeof (sin6_t),
921 921 (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
922 922 (t_scalar_t)ltcp->tcp_conn_req_seqnum);
923 923 }
924 924 }
925 925
926 926 tcp->tcp_mss = tcps->tcps_mss_def_ipv6;
927 927 return (tpi_mp);
928 928 }
929 929
930 930 /* Handle a SYN on an AF_INET socket */
931 931 static mblk_t *
932 932 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, mblk_t *mp,
933 933 ip_recv_attr_t *ira)
934 934 {
935 935 tcp_t *ltcp = lconnp->conn_tcp;
936 936 tcp_t *tcp = connp->conn_tcp;
937 937 sin_t sin;
938 938 mblk_t *tpi_mp = NULL;
939 939 tcp_stack_t *tcps = tcp->tcp_tcps;
940 940 ipha_t *ipha;
941 941
942 942 ASSERT(ira->ira_flags & IRAF_IS_IPV4);
943 943 ipha = (ipha_t *)mp->b_rptr;
944 944
945 945 connp->conn_ipversion = IPV4_VERSION;
946 946 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_laddr_v6);
947 947 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_faddr_v6);
948 948 connp->conn_saddr_v6 = connp->conn_laddr_v6;
949 949
950 950 sin = sin_null;
951 951 sin.sin_addr.s_addr = connp->conn_faddr_v4;
952 952 sin.sin_port = connp->conn_fport;
953 953 sin.sin_family = AF_INET;
954 954 if (lconnp->conn_recv_ancillary.crb_recvdstaddr) {
955 955 sin_t sind;
956 956
957 957 sind = sin_null;
958 958 sind.sin_addr.s_addr = connp->conn_laddr_v4;
959 959 sind.sin_port = connp->conn_lport;
960 960 sind.sin_family = AF_INET;
961 961 tpi_mp = mi_tpi_extconn_ind(NULL,
962 962 (char *)&sind, sizeof (sin_t), (char *)&tcp,
963 963 (t_scalar_t)sizeof (intptr_t), (char *)&sind,
964 964 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum);
965 965 } else {
966 966 tpi_mp = mi_tpi_conn_ind(NULL,
967 967 (char *)&sin, sizeof (sin_t),
968 968 (char *)&tcp, (t_scalar_t)sizeof (intptr_t),
969 969 (t_scalar_t)ltcp->tcp_conn_req_seqnum);
970 970 }
971 971
972 972 tcp->tcp_mss = tcps->tcps_mss_def_ipv4;
973 973 return (tpi_mp);
974 974 }
975 975
976 976 /*
977 977 * Called via squeue to get on to eager's perimeter. It sends a
978 978 * TH_RST if eager is in the fanout table. The listener wants the
979 979 * eager to disappear either by means of tcp_eager_blowoff() or
980 980 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be
981 981 * called (via squeue) if the eager cannot be inserted in the
982 982 * fanout table in tcp_input_listener().
983 983 */
984 984 /* ARGSUSED */
985 985 void
986 986 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
987 987 {
988 988 conn_t *econnp = (conn_t *)arg;
989 989 tcp_t *eager = econnp->conn_tcp;
990 990 tcp_t *listener = eager->tcp_listener;
991 991
992 992 /*
993 993 * We could be called because listener is closing. Since
994 994 * the eager was using listener's queue's, we avoid
995 995 * using the listeners queues from now on.
996 996 */
997 997 ASSERT(eager->tcp_detached);
998 998 econnp->conn_rq = NULL;
999 999 econnp->conn_wq = NULL;
1000 1000
1001 1001 /*
1002 1002 * An eager's conn_fanout will be NULL if it's a duplicate
1003 1003 * for an existing 4-tuples in the conn fanout table.
1004 1004 * We don't want to send an RST out in such case.
1005 1005 */
1006 1006 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) {
1007 1007 tcp_xmit_ctl("tcp_eager_kill, can't wait",
1008 1008 eager, eager->tcp_snxt, 0, TH_RST);
1009 1009 }
1010 1010
1011 1011 /* We are here because listener wants this eager gone */
1012 1012 if (listener != NULL) {
1013 1013 mutex_enter(&listener->tcp_eager_lock);
1014 1014 tcp_eager_unlink(eager);
1015 1015 if (eager->tcp_tconnind_started) {
1016 1016 /*
1017 1017 * The eager has sent a conn_ind up to the
1018 1018 * listener but listener decides to close
1019 1019 * instead. We need to drop the extra ref
1020 1020 * placed on eager in tcp_input_data() before
1021 1021 * sending the conn_ind to listener.
1022 1022 */
1023 1023 CONN_DEC_REF(econnp);
1024 1024 }
1025 1025 mutex_exit(&listener->tcp_eager_lock);
1026 1026 CONN_DEC_REF(listener->tcp_connp);
1027 1027 }
1028 1028
1029 1029 if (eager->tcp_state != TCPS_CLOSED)
1030 1030 tcp_close_detached(eager);
1031 1031 }
1032 1032
1033 1033 /*
1034 1034 * Reset any eager connection hanging off this listener marked
1035 1035 * with 'seqnum' and then reclaim it's resources.
1036 1036 */
1037 1037 boolean_t
1038 1038 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum)
1039 1039 {
1040 1040 tcp_t *eager;
1041 1041 mblk_t *mp;
1042 1042
1043 1043 eager = listener;
1044 1044 mutex_enter(&listener->tcp_eager_lock);
1045 1045 do {
1046 1046 eager = eager->tcp_eager_next_q;
1047 1047 if (eager == NULL) {
1048 1048 mutex_exit(&listener->tcp_eager_lock);
1049 1049 return (B_FALSE);
1050 1050 }
1051 1051 } while (eager->tcp_conn_req_seqnum != seqnum);
1052 1052
1053 1053 if (eager->tcp_closemp_used) {
1054 1054 mutex_exit(&listener->tcp_eager_lock);
1055 1055 return (B_TRUE);
1056 1056 }
1057 1057 eager->tcp_closemp_used = B_TRUE;
1058 1058 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
1059 1059 CONN_INC_REF(eager->tcp_connp);
1060 1060 mutex_exit(&listener->tcp_eager_lock);
1061 1061 mp = &eager->tcp_closemp;
1062 1062 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill,
1063 1063 eager->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_EAGER_BLOWOFF);
1064 1064 return (B_TRUE);
1065 1065 }
1066 1066
1067 1067 /*
1068 1068 * Reset any eager connection hanging off this listener
1069 1069 * and then reclaim it's resources.
1070 1070 */
1071 1071 void
1072 1072 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only)
1073 1073 {
1074 1074 tcp_t *eager;
1075 1075 mblk_t *mp;
1076 1076 tcp_stack_t *tcps = listener->tcp_tcps;
1077 1077
1078 1078 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock));
1079 1079
1080 1080 if (!q0_only) {
1081 1081 /* First cleanup q */
1082 1082 TCP_STAT(tcps, tcp_eager_blowoff_q);
1083 1083 eager = listener->tcp_eager_next_q;
1084 1084 while (eager != NULL) {
1085 1085 if (!eager->tcp_closemp_used) {
1086 1086 eager->tcp_closemp_used = B_TRUE;
1087 1087 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
1088 1088 CONN_INC_REF(eager->tcp_connp);
1089 1089 mp = &eager->tcp_closemp;
1090 1090 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
1091 1091 tcp_eager_kill, eager->tcp_connp, NULL,
1092 1092 SQ_FILL, SQTAG_TCP_EAGER_CLEANUP);
1093 1093 }
1094 1094 eager = eager->tcp_eager_next_q;
1095 1095 }
1096 1096 }
1097 1097 /* Then cleanup q0 */
1098 1098 TCP_STAT(tcps, tcp_eager_blowoff_q0);
1099 1099 eager = listener->tcp_eager_next_q0;
1100 1100 while (eager != listener) {
1101 1101 if (!eager->tcp_closemp_used) {
1102 1102 eager->tcp_closemp_used = B_TRUE;
1103 1103 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
1104 1104 CONN_INC_REF(eager->tcp_connp);
1105 1105 mp = &eager->tcp_closemp;
1106 1106 SQUEUE_ENTER_ONE(eager->tcp_connp->conn_sqp, mp,
1107 1107 tcp_eager_kill, eager->tcp_connp, NULL, SQ_FILL,
1108 1108 SQTAG_TCP_EAGER_CLEANUP_Q0);
1109 1109 }
1110 1110 eager = eager->tcp_eager_next_q0;
1111 1111 }
1112 1112 }
1113 1113
1114 1114 /*
1115 1115 * If we are an eager connection hanging off a listener that hasn't
1116 1116 * formally accepted the connection yet, get off his list and blow off
1117 1117 * any data that we have accumulated.
1118 1118 */
1119 1119 void
1120 1120 tcp_eager_unlink(tcp_t *tcp)
1121 1121 {
1122 1122 tcp_t *listener = tcp->tcp_listener;
1123 1123
1124 1124 ASSERT(listener != NULL);
1125 1125 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock));
1126 1126 if (tcp->tcp_eager_next_q0 != NULL) {
1127 1127 ASSERT(tcp->tcp_eager_prev_q0 != NULL);
1128 1128
1129 1129 /* Remove the eager tcp from q0 */
1130 1130 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 =
1131 1131 tcp->tcp_eager_prev_q0;
1132 1132 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 =
1133 1133 tcp->tcp_eager_next_q0;
1134 1134 ASSERT(listener->tcp_conn_req_cnt_q0 > 0);
1135 1135 listener->tcp_conn_req_cnt_q0--;
1136 1136
1137 1137 tcp->tcp_eager_next_q0 = NULL;
1138 1138 tcp->tcp_eager_prev_q0 = NULL;
1139 1139
1140 1140 /*
1141 1141 * Take the eager out, if it is in the list of droppable
1142 1142 * eagers.
1143 1143 */
1144 1144 MAKE_UNDROPPABLE(tcp);
1145 1145
1146 1146 if (tcp->tcp_syn_rcvd_timeout != 0) {
1147 1147 /* we have timed out before */
1148 1148 ASSERT(listener->tcp_syn_rcvd_timeout > 0);
1149 1149 listener->tcp_syn_rcvd_timeout--;
1150 1150 }
1151 1151 } else {
1152 1152 tcp_t **tcpp = &listener->tcp_eager_next_q;
1153 1153 tcp_t *prev = NULL;
1154 1154
1155 1155 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) {
1156 1156 if (tcpp[0] == tcp) {
1157 1157 if (listener->tcp_eager_last_q == tcp) {
1158 1158 /*
1159 1159 * If we are unlinking the last
1160 1160 * element on the list, adjust
1161 1161 * tail pointer. Set tail pointer
1162 1162 * to nil when list is empty.
1163 1163 */
1164 1164 ASSERT(tcp->tcp_eager_next_q == NULL);
1165 1165 if (listener->tcp_eager_last_q ==
1166 1166 listener->tcp_eager_next_q) {
1167 1167 listener->tcp_eager_last_q =
1168 1168 NULL;
1169 1169 } else {
1170 1170 /*
1171 1171 * We won't get here if there
1172 1172 * is only one eager in the
1173 1173 * list.
1174 1174 */
1175 1175 ASSERT(prev != NULL);
1176 1176 listener->tcp_eager_last_q =
1177 1177 prev;
1178 1178 }
1179 1179 }
1180 1180 tcpp[0] = tcp->tcp_eager_next_q;
1181 1181 tcp->tcp_eager_next_q = NULL;
1182 1182 tcp->tcp_eager_last_q = NULL;
1183 1183 ASSERT(listener->tcp_conn_req_cnt_q > 0);
1184 1184 listener->tcp_conn_req_cnt_q--;
1185 1185 break;
1186 1186 }
1187 1187 prev = tcpp[0];
1188 1188 }
1189 1189 }
1190 1190 tcp->tcp_listener = NULL;
1191 1191 }
1192 1192
1193 1193 /* BEGIN CSTYLED */
1194 1194 /*
1195 1195 *
1196 1196 * The sockfs ACCEPT path:
1197 1197 * =======================
1198 1198 *
1199 1199 * The eager is now established in its own perimeter as soon as SYN is
1200 1200 * received in tcp_input_listener(). When sockfs receives conn_ind, it
1201 1201 * completes the accept processing on the acceptor STREAM. The sending
1202 1202 * of conn_ind part is common for both sockfs listener and a TLI/XTI
1203 1203 * listener but a TLI/XTI listener completes the accept processing
1204 1204 * on the listener perimeter.
1205 1205 *
1206 1206 * Common control flow for 3 way handshake:
1207 1207 * ----------------------------------------
1208 1208 *
1209 1209 * incoming SYN (listener perimeter) -> tcp_input_listener()
1210 1210 *
1211 1211 * incoming SYN-ACK-ACK (eager perim) -> tcp_input_data()
1212 1212 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind()
1213 1213 *
1214 1214 * Sockfs ACCEPT Path:
1215 1215 * -------------------
1216 1216 *
1217 1217 * open acceptor stream (tcp_open allocates tcp_tli_accept()
1218 1218 * as STREAM entry point)
1219 1219 *
1220 1220 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_tli_accept()
1221 1221 *
1222 1222 * tcp_tli_accept() extracts the eager and makes the q->q_ptr <-> eager
1223 1223 * association (we are not behind eager's squeue but sockfs is protecting us
1224 1224 * and no one knows about this stream yet. The STREAMS entry point q->q_info
1225 1225 * is changed to point at tcp_wput().
1226 1226 *
1227 1227 * tcp_accept_common() sends any deferred eagers via tcp_send_pending() to
1228 1228 * listener (done on listener's perimeter).
1229 1229 *
1230 1230 * tcp_tli_accept() calls tcp_accept_finish() on eagers perimeter to finish
1231 1231 * accept.
1232 1232 *
1233 1233 * TLI/XTI client ACCEPT path:
1234 1234 * ---------------------------
1235 1235 *
1236 1236 * soaccept() sends T_CONN_RES on the listener STREAM.
1237 1237 *
1238 1238 * tcp_tli_accept() -> tcp_accept_swap() complete the processing and send
1239 1239 * a M_SETOPS mblk to eager perimeter to finish accept (tcp_accept_finish()).
1240 1240 *
1241 1241 * Locks:
1242 1242 * ======
1243 1243 *
1244 1244 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and
1245 1245 * and listeners->tcp_eager_next_q.
1246 1246 *
1247 1247 * Referencing:
1248 1248 * ============
1249 1249 *
1250 1250 * 1) We start out in tcp_input_listener by eager placing a ref on
1251 1251 * listener and listener adding eager to listeners->tcp_eager_next_q0.
1252 1252 *
1253 1253 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before
1254 1254 * doing so we place a ref on the eager. This ref is finally dropped at the
1255 1255 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the
1256 1256 * reference is dropped by the squeue framework.
1257 1257 *
1258 1258 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish
1259 1259 *
1260 1260 * The reference must be released by the same entity that added the reference
1261 1261 * In the above scheme, the eager is the entity that adds and releases the
1262 1262 * references. Note that tcp_accept_finish executes in the squeue of the eager
1263 1263 * (albeit after it is attached to the acceptor stream). Though 1. executes
1264 1264 * in the listener's squeue, the eager is nascent at this point and the
1265 1265 * reference can be considered to have been added on behalf of the eager.
1266 1266 *
1267 1267 * Eager getting a Reset or listener closing:
1268 1268 * ==========================================
1269 1269 *
1270 1270 * Once the listener and eager are linked, the listener never does the unlink.
1271 1271 * If the listener needs to close, tcp_eager_cleanup() is called which queues
1272 1272 * a message on all eager perimeter. The eager then does the unlink, clears
1273 1273 * any pointers to the listener's queue and drops the reference to the
1274 1274 * listener. The listener waits in tcp_close outside the squeue until its
1275 1275 * refcount has dropped to 1. This ensures that the listener has waited for
1276 1276 * all eagers to clear their association with the listener.
1277 1277 *
1278 1278 * Similarly, if eager decides to go away, it can unlink itself and close.
1279 1279 * When the T_CONN_RES comes down, we check if eager has closed. Note that
1280 1280 * the reference to eager is still valid because of the extra ref we put
1281 1281 * in tcp_send_conn_ind.
1282 1282 *
1283 1283 * Listener can always locate the eager under the protection
1284 1284 * of the listener->tcp_eager_lock, and then do a refhold
1285 1285 * on the eager during the accept processing.
1286 1286 *
1287 1287 * The acceptor stream accesses the eager in the accept processing
1288 1288 * based on the ref placed on eager before sending T_conn_ind.
1289 1289 * The only entity that can negate this refhold is a listener close
1290 1290 * which is mutually exclusive with an active acceptor stream.
1291 1291 *
1292 1292 * Eager's reference on the listener
1293 1293 * ===================================
1294 1294 *
1295 1295 * If the accept happens (even on a closed eager) the eager drops its
1296 1296 * reference on the listener at the start of tcp_accept_finish. If the
1297 1297 * eager is killed due to an incoming RST before the T_conn_ind is sent up,
1298 1298 * the reference is dropped in tcp_closei_local. If the listener closes,
1299 1299 * the reference is dropped in tcp_eager_kill. In all cases the reference
1300 1300 * is dropped while executing in the eager's context (squeue).
1301 1301 */
1302 1302 /* END CSTYLED */
1303 1303
1304 1304 /* Process the SYN packet, mp, directed at the listener 'tcp' */
1305 1305
1306 1306 /*
1307 1307 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN.
1308 1308 * tcp_input_data will not see any packets for listeners since the listener
1309 1309 * has conn_recv set to tcp_input_listener.
1310 1310 */
1311 1311 /* ARGSUSED */
1312 1312 static void
1313 1313 tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
1314 1314 {
1315 1315 tcpha_t *tcpha;
1316 1316 uint32_t seg_seq;
1317 1317 tcp_t *eager;
1318 1318 int err;
1319 1319 conn_t *econnp = NULL;
1320 1320 squeue_t *new_sqp;
1321 1321 mblk_t *mp1;
1322 1322 uint_t ip_hdr_len;
1323 1323 conn_t *lconnp = (conn_t *)arg;
1324 1324 tcp_t *listener = lconnp->conn_tcp;
1325 1325 tcp_stack_t *tcps = listener->tcp_tcps;
1326 1326 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
1327 1327 uint_t flags;
1328 1328 mblk_t *tpi_mp;
1329 1329 uint_t ifindex = ira->ira_ruifindex;
1330 1330 boolean_t tlc_set = B_FALSE;
1331 1331
1332 1332 ip_hdr_len = ira->ira_ip_hdr_length;
1333 1333 tcpha = (tcpha_t *)&mp->b_rptr[ip_hdr_len];
1334 1334 flags = (unsigned int)tcpha->tha_flags & 0xFF;
1335 1335
1336 1336 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, lconnp->conn_ixa,
1337 1337 __dtrace_tcp_void_ip_t *, mp->b_rptr, tcp_t *, listener,
1338 1338 __dtrace_tcp_tcph_t *, tcpha);
1339 1339
1340 1340 if (!(flags & TH_SYN)) {
1341 1341 if ((flags & TH_RST) || (flags & TH_URG)) {
1342 1342 freemsg(mp);
1343 1343 return;
1344 1344 }
1345 1345 if (flags & TH_ACK) {
1346 1346 /* Note this executes in listener's squeue */
1347 1347 tcp_xmit_listeners_reset(mp, ira, ipst, lconnp);
1348 1348 return;
1349 1349 }
1350 1350
1351 1351 freemsg(mp);
1352 1352 return;
1353 1353 }
1354 1354
1355 1355 if (listener->tcp_state != TCPS_LISTEN)
1356 1356 goto error2;
1357 1357
1358 1358 ASSERT(IPCL_IS_BOUND(lconnp));
1359 1359
1360 1360 mutex_enter(&listener->tcp_eager_lock);
1361 1361
1362 1362 /*
1363 1363 * The system is under memory pressure, so we need to do our part
1364 1364 * to relieve the pressure. So we only accept new request if there
1365 1365 * is nothing waiting to be accepted or waiting to complete the 3-way
1366 1366 * handshake. This means that busy listener will not get too many
1367 1367 * new requests which they cannot handle in time while non-busy
1368 1368 * listener is still functioning properly.
1369 1369 */
1370 1370 if (tcps->tcps_reclaim && (listener->tcp_conn_req_cnt_q > 0 ||
1371 1371 listener->tcp_conn_req_cnt_q0 > 0)) {
1372 1372 mutex_exit(&listener->tcp_eager_lock);
1373 1373 TCP_STAT(tcps, tcp_listen_mem_drop);
1374 1374 goto error2;
1375 1375 }
1376 1376
1377 1377 if (listener->tcp_conn_req_cnt_q >= listener->tcp_conn_req_max) {
1378 1378 mutex_exit(&listener->tcp_eager_lock);
1379 1379 TCP_STAT(tcps, tcp_listendrop);
1380 1380 TCPS_BUMP_MIB(tcps, tcpListenDrop);
1381 1381 if (lconnp->conn_debug) {
1382 1382 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR,
1383 1383 "tcp_input_listener: listen backlog (max=%d) "
1384 1384 "overflow (%d pending) on %s",
1385 1385 listener->tcp_conn_req_max,
1386 1386 listener->tcp_conn_req_cnt_q,
1387 1387 tcp_display(listener, NULL, DISP_PORT_ONLY));
1388 1388 }
1389 1389 goto error2;
1390 1390 }
1391 1391
1392 1392 if (listener->tcp_conn_req_cnt_q0 >=
1393 1393 listener->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) {
1394 1394 /*
1395 1395 * Q0 is full. Drop a pending half-open req from the queue
1396 1396 * to make room for the new SYN req. Also mark the time we
1397 1397 * drop a SYN.
1398 1398 *
1399 1399 * A more aggressive defense against SYN attack will
1400 1400 * be to set the "tcp_syn_defense" flag now.
1401 1401 */
1402 1402 TCP_STAT(tcps, tcp_listendropq0);
1403 1403 listener->tcp_last_rcv_lbolt = ddi_get_lbolt64();
1404 1404 if (!tcp_drop_q0(listener)) {
1405 1405 mutex_exit(&listener->tcp_eager_lock);
1406 1406 TCPS_BUMP_MIB(tcps, tcpListenDropQ0);
1407 1407 if (lconnp->conn_debug) {
1408 1408 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE,
1409 1409 "tcp_input_listener: listen half-open "
1410 1410 "queue (max=%d) full (%d pending) on %s",
1411 1411 tcps->tcps_conn_req_max_q0,
1412 1412 listener->tcp_conn_req_cnt_q0,
1413 1413 tcp_display(listener, NULL,
1414 1414 DISP_PORT_ONLY));
1415 1415 }
1416 1416 goto error2;
1417 1417 }
1418 1418 }
1419 1419
1420 1420 /*
1421 1421 * Enforce the limit set on the number of connections per listener.
1422 1422 * Note that tlc_cnt starts with 1. So need to add 1 to tlc_max
1423 1423 * for comparison.
1424 1424 */
1425 1425 if (listener->tcp_listen_cnt != NULL) {
1426 1426 tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt;
1427 1427 int64_t now;
1428 1428
1429 1429 if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) {
1430 1430 mutex_exit(&listener->tcp_eager_lock);
1431 1431 now = ddi_get_lbolt64();
1432 1432 atomic_add_32(&tlc->tlc_cnt, -1);
1433 1433 TCP_STAT(tcps, tcp_listen_cnt_drop);
1434 1434 tlc->tlc_drop++;
1435 1435 if (now - tlc->tlc_report_time >
1436 1436 MSEC_TO_TICK(TCP_TLC_REPORT_INTERVAL)) {
1437 1437 zcmn_err(lconnp->conn_zoneid, CE_WARN,
1438 1438 "Listener (port %d) connection max (%u) "
1439 1439 "reached: %u attempts dropped total\n",
1440 1440 ntohs(listener->tcp_connp->conn_lport),
1441 1441 tlc->tlc_max, tlc->tlc_drop);
1442 1442 tlc->tlc_report_time = now;
1443 1443 }
1444 1444 goto error2;
1445 1445 }
1446 1446 tlc_set = B_TRUE;
1447 1447 }
1448 1448
1449 1449 mutex_exit(&listener->tcp_eager_lock);
1450 1450
1451 1451 /*
1452 1452 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1453 1453 * or based on the ring (for packets from GLD). Otherwise it is
1454 1454 * set based on lbolt i.e., a somewhat random number.
1455 1455 */
1456 1456 ASSERT(ira->ira_sqp != NULL);
1457 1457 new_sqp = ira->ira_sqp;
1458 1458
1459 1459 econnp = (conn_t *)tcp_get_conn(arg2, tcps);
1460 1460 if (econnp == NULL)
1461 1461 goto error2;
1462 1462
1463 1463 ASSERT(econnp->conn_netstack == lconnp->conn_netstack);
1464 1464 econnp->conn_sqp = new_sqp;
1465 1465 econnp->conn_initial_sqp = new_sqp;
1466 1466 econnp->conn_ixa->ixa_sqp = new_sqp;
1467 1467
1468 1468 econnp->conn_fport = tcpha->tha_lport;
1469 1469 econnp->conn_lport = tcpha->tha_fport;
1470 1470
1471 1471 err = conn_inherit_parent(lconnp, econnp);
1472 1472 if (err != 0)
1473 1473 goto error3;
1474 1474
1475 1475 /* We already know the laddr of the new connection is ours */
1476 1476 econnp->conn_ixa->ixa_src_generation = ipst->ips_src_generation;
1477 1477
1478 1478 ASSERT(OK_32PTR(mp->b_rptr));
1479 1479 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION ||
1480 1480 IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION);
1481 1481
1482 1482 if (lconnp->conn_family == AF_INET) {
1483 1483 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION);
1484 1484 tpi_mp = tcp_conn_create_v4(lconnp, econnp, mp, ira);
1485 1485 } else {
1486 1486 tpi_mp = tcp_conn_create_v6(lconnp, econnp, mp, ira);
1487 1487 }
1488 1488
1489 1489 if (tpi_mp == NULL)
1490 1490 goto error3;
1491 1491
1492 1492 eager = econnp->conn_tcp;
1493 1493 eager->tcp_detached = B_TRUE;
1494 1494 SOCK_CONNID_INIT(eager->tcp_connid);
1495 1495
1496 1496 /*
1497 1497 * Initialize the eager's tcp_t and inherit some parameters from
1498 1498 * the listener.
1499 1499 */
1500 1500 tcp_init_values(eager, listener);
1501 1501
1502 1502 ASSERT((econnp->conn_ixa->ixa_flags &
1503 1503 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
1504 1504 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO)) ==
1505 1505 (IXAF_SET_ULP_CKSUM | IXAF_VERIFY_SOURCE |
1506 1506 IXAF_VERIFY_PMTU | IXAF_VERIFY_LSO));
1507 1507
1508 1508 if (!tcps->tcps_dev_flow_ctl)
1509 1509 econnp->conn_ixa->ixa_flags |= IXAF_NO_DEV_FLOW_CTL;
1510 1510
1511 1511 /* Prepare for diffing against previous packets */
1512 1512 eager->tcp_recvifindex = 0;
1513 1513 eager->tcp_recvhops = 0xffffffffU;
1514 1514
1515 1515 if (!(ira->ira_flags & IRAF_IS_IPV4) && econnp->conn_bound_if == 0) {
1516 1516 if (IN6_IS_ADDR_LINKSCOPE(&econnp->conn_faddr_v6) ||
1517 1517 IN6_IS_ADDR_LINKSCOPE(&econnp->conn_laddr_v6)) {
1518 1518 econnp->conn_incoming_ifindex = ifindex;
1519 1519 econnp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET;
1520 1520 econnp->conn_ixa->ixa_scopeid = ifindex;
1521 1521 }
1522 1522 }
1523 1523
1524 1524 if ((ira->ira_flags & (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS)) ==
1525 1525 (IRAF_IS_IPV4|IRAF_IPV4_OPTIONS) &&
1526 1526 tcps->tcps_rev_src_routes) {
1527 1527 ipha_t *ipha = (ipha_t *)mp->b_rptr;
1528 1528 ip_pkt_t *ipp = &econnp->conn_xmit_ipp;
1529 1529
1530 1530 /* Source routing option copyover (reverse it) */
1531 1531 err = ip_find_hdr_v4(ipha, ipp, B_TRUE);
1532 1532 if (err != 0) {
1533 1533 freemsg(tpi_mp);
1534 1534 goto error3;
1535 1535 }
1536 1536 ip_pkt_source_route_reverse_v4(ipp);
1537 1537 }
1538 1538
1539 1539 ASSERT(eager->tcp_conn.tcp_eager_conn_ind == NULL);
1540 1540 ASSERT(!eager->tcp_tconnind_started);
1541 1541 /*
1542 1542 * If the SYN came with a credential, it's a loopback packet or a
1543 1543 * labeled packet; attach the credential to the TPI message.
1544 1544 */
1545 1545 if (ira->ira_cred != NULL)
1546 1546 mblk_setcred(tpi_mp, ira->ira_cred, ira->ira_cpid);
1547 1547
1548 1548 eager->tcp_conn.tcp_eager_conn_ind = tpi_mp;
1549 1549 ASSERT(eager->tcp_ordrel_mp == NULL);
1550 1550
1551 1551 /* Inherit the listener's non-STREAMS flag */
1552 1552 if (IPCL_IS_NONSTR(lconnp)) {
1553 1553 econnp->conn_flags |= IPCL_NONSTR;
1554 1554 /* All non-STREAMS tcp_ts are sockets */
1555 1555 eager->tcp_issocket = B_TRUE;
1556 1556 } else {
1557 1557 /*
1558 1558 * Pre-allocate the T_ordrel_ind mblk for TPI socket so that
1559 1559 * at close time, we will always have that to send up.
1560 1560 * Otherwise, we need to do special handling in case the
1561 1561 * allocation fails at that time.
1562 1562 */
1563 1563 if ((eager->tcp_ordrel_mp = mi_tpi_ordrel_ind()) == NULL)
1564 1564 goto error3;
1565 1565 }
1566 1566 /*
1567 1567 * Now that the IP addresses and ports are setup in econnp we
1568 1568 * can do the IPsec policy work.
1569 1569 */
1570 1570 if (ira->ira_flags & IRAF_IPSEC_SECURE) {
1571 1571 if (lconnp->conn_policy != NULL) {
1572 1572 /*
1573 1573 * Inherit the policy from the listener; use
1574 1574 * actions from ira
1575 1575 */
1576 1576 if (!ip_ipsec_policy_inherit(econnp, lconnp, ira)) {
1577 1577 CONN_DEC_REF(econnp);
1578 1578 freemsg(mp);
1579 1579 goto error3;
1580 1580 }
1581 1581 }
1582 1582 }
1583 1583
1584 1584 /*
1585 1585 * tcp_set_destination() may set tcp_rwnd according to the route
1586 1586 * metrics. If it does not, the eager's receive window will be set
1587 1587 * to the listener's receive window later in this function.
1588 1588 */
1589 1589 eager->tcp_rwnd = 0;
1590 1590
1591 1591 if (is_system_labeled()) {
1592 1592 ip_xmit_attr_t *ixa = econnp->conn_ixa;
1593 1593
1594 1594 ASSERT(ira->ira_tsl != NULL);
1595 1595 /* Discard any old label */
1596 1596 if (ixa->ixa_free_flags & IXA_FREE_TSL) {
1597 1597 ASSERT(ixa->ixa_tsl != NULL);
1598 1598 label_rele(ixa->ixa_tsl);
1599 1599 ixa->ixa_free_flags &= ~IXA_FREE_TSL;
1600 1600 ixa->ixa_tsl = NULL;
1601 1601 }
1602 1602 if ((lconnp->conn_mlp_type != mlptSingle ||
1603 1603 lconnp->conn_mac_mode != CONN_MAC_DEFAULT) &&
1604 1604 ira->ira_tsl != NULL) {
1605 1605 /*
1606 1606 * If this is an MLP connection or a MAC-Exempt
1607 1607 * connection with an unlabeled node, packets are to be
1608 1608 * exchanged using the security label of the received
1609 1609 * SYN packet instead of the server application's label.
1610 1610 * tsol_check_dest called from ip_set_destination
1611 1611 * might later update TSF_UNLABELED by replacing
1612 1612 * ixa_tsl with a new label.
1613 1613 */
1614 1614 label_hold(ira->ira_tsl);
1615 1615 ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl);
1616 1616 DTRACE_PROBE2(mlp_syn_accept, conn_t *,
1617 1617 econnp, ts_label_t *, ixa->ixa_tsl)
1618 1618 } else {
1619 1619 ixa->ixa_tsl = crgetlabel(econnp->conn_cred);
1620 1620 DTRACE_PROBE2(syn_accept, conn_t *,
1621 1621 econnp, ts_label_t *, ixa->ixa_tsl)
1622 1622 }
1623 1623 /*
1624 1624 * conn_connect() called from tcp_set_destination will verify
1625 1625 * the destination is allowed to receive packets at the
1626 1626 * security label of the SYN-ACK we are generating. As part of
1627 1627 * that, tsol_check_dest() may create a new effective label for
1628 1628 * this connection.
1629 1629 * Finally conn_connect() will call conn_update_label.
1630 1630 * All that remains for TCP to do is to call
1631 1631 * conn_build_hdr_template which is done as part of
1632 1632 * tcp_set_destination.
1633 1633 */
1634 1634 }
1635 1635
1636 1636 /*
1637 1637 * Since we will clear tcp_listener before we clear tcp_detached
1638 1638 * in the accept code we need tcp_hard_binding aka tcp_accept_inprogress
1639 1639 * so we can tell a TCP_IS_DETACHED_NONEAGER apart.
1640 1640 */
1641 1641 eager->tcp_hard_binding = B_TRUE;
1642 1642
1643 1643 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[
1644 1644 TCP_BIND_HASH(econnp->conn_lport)], eager, 0);
1645 1645
1646 1646 CL_INET_CONNECT(econnp, B_FALSE, err);
1647 1647 if (err != 0) {
1648 1648 tcp_bind_hash_remove(eager);
1649 1649 goto error3;
1650 1650 }
1651 1651
1652 1652 SOCK_CONNID_BUMP(eager->tcp_connid);
1653 1653
1654 1654 /*
1655 1655 * Adapt our mss, ttl, ... based on the remote address.
1656 1656 */
1657 1657
1658 1658 if (tcp_set_destination(eager) != 0) {
1659 1659 TCPS_BUMP_MIB(tcps, tcpAttemptFails);
1660 1660 /* Undo the bind_hash_insert */
1661 1661 tcp_bind_hash_remove(eager);
1662 1662 goto error3;
1663 1663 }
1664 1664
1665 1665 /* Process all TCP options. */
1666 1666 tcp_process_options(eager, tcpha);
1667 1667
1668 1668 /* Is the other end ECN capable? */
1669 1669 if (tcps->tcps_ecn_permitted >= 1 &&
1670 1670 (tcpha->tha_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) {
1671 1671 eager->tcp_ecn_ok = B_TRUE;
1672 1672 }
1673 1673
1674 1674 /*
1675 1675 * The listener's conn_rcvbuf should be the default window size or a
1676 1676 * window size changed via SO_RCVBUF option. First round up the
1677 1677 * eager's tcp_rwnd to the nearest MSS. Then find out the window
1678 1678 * scale option value if needed. Call tcp_rwnd_set() to finish the
1679 1679 * setting.
1680 1680 *
1681 1681 * Note if there is a rpipe metric associated with the remote host,
1682 1682 * we should not inherit receive window size from listener.
1683 1683 */
1684 1684 eager->tcp_rwnd = MSS_ROUNDUP(
1685 1685 (eager->tcp_rwnd == 0 ? econnp->conn_rcvbuf :
1686 1686 eager->tcp_rwnd), eager->tcp_mss);
1687 1687 if (eager->tcp_snd_ws_ok)
1688 1688 tcp_set_ws_value(eager);
1689 1689 /*
1690 1690 * Note that this is the only place tcp_rwnd_set() is called for
1691 1691 * accepting a connection. We need to call it here instead of
1692 1692 * after the 3-way handshake because we need to tell the other
1693 1693 * side our rwnd in the SYN-ACK segment.
1694 1694 */
1695 1695 (void) tcp_rwnd_set(eager, eager->tcp_rwnd);
1696 1696
1697 1697 ASSERT(eager->tcp_connp->conn_rcvbuf != 0 &&
1698 1698 eager->tcp_connp->conn_rcvbuf == eager->tcp_rwnd);
1699 1699
1700 1700 ASSERT(econnp->conn_rcvbuf != 0 &&
1701 1701 econnp->conn_rcvbuf == eager->tcp_rwnd);
1702 1702
1703 1703 /* Put a ref on the listener for the eager. */
1704 1704 CONN_INC_REF(lconnp);
1705 1705 mutex_enter(&listener->tcp_eager_lock);
1706 1706 listener->tcp_eager_next_q0->tcp_eager_prev_q0 = eager;
1707 1707 eager->tcp_eager_next_q0 = listener->tcp_eager_next_q0;
1708 1708 listener->tcp_eager_next_q0 = eager;
1709 1709 eager->tcp_eager_prev_q0 = listener;
1710 1710
1711 1711 /* Set tcp_listener before adding it to tcp_conn_fanout */
1712 1712 eager->tcp_listener = listener;
1713 1713 eager->tcp_saved_listener = listener;
1714 1714
1715 1715 /*
1716 1716 * Set tcp_listen_cnt so that when the connection is done, the counter
1717 1717 * is decremented.
1718 1718 */
1719 1719 eager->tcp_listen_cnt = listener->tcp_listen_cnt;
1720 1720
1721 1721 /*
1722 1722 * Tag this detached tcp vector for later retrieval
1723 1723 * by our listener client in tcp_accept().
1724 1724 */
1725 1725 eager->tcp_conn_req_seqnum = listener->tcp_conn_req_seqnum;
1726 1726 listener->tcp_conn_req_cnt_q0++;
1727 1727 if (++listener->tcp_conn_req_seqnum == -1) {
1728 1728 /*
1729 1729 * -1 is "special" and defined in TPI as something
1730 1730 * that should never be used in T_CONN_IND
1731 1731 */
1732 1732 ++listener->tcp_conn_req_seqnum;
1733 1733 }
1734 1734 mutex_exit(&listener->tcp_eager_lock);
1735 1735
1736 1736 if (listener->tcp_syn_defense) {
1737 1737 /* Don't drop the SYN that comes from a good IP source */
1738 1738 ipaddr_t *addr_cache;
1739 1739
1740 1740 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache);
1741 1741 if (addr_cache != NULL && econnp->conn_faddr_v4 ==
1742 1742 addr_cache[IP_ADDR_CACHE_HASH(econnp->conn_faddr_v4)]) {
1743 1743 eager->tcp_dontdrop = B_TRUE;
1744 1744 }
1745 1745 }
1746 1746
1747 1747 /*
1748 1748 * We need to insert the eager in its own perimeter but as soon
1749 1749 * as we do that, we expose the eager to the classifier and
1750 1750 * should not touch any field outside the eager's perimeter.
1751 1751 * So do all the work necessary before inserting the eager
1752 1752 * in its own perimeter. Be optimistic that conn_connect()
1753 1753 * will succeed but undo everything if it fails.
1754 1754 */
1755 1755 seg_seq = ntohl(tcpha->tha_seq);
1756 1756 eager->tcp_irs = seg_seq;
1757 1757 eager->tcp_rack = seg_seq;
1758 1758 eager->tcp_rnxt = seg_seq + 1;
1759 1759 eager->tcp_tcpha->tha_ack = htonl(eager->tcp_rnxt);
1760 1760 TCPS_BUMP_MIB(tcps, tcpPassiveOpens);
1761 1761 eager->tcp_state = TCPS_SYN_RCVD;
1762 1762 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
1763 1763 econnp->conn_ixa, void, NULL, tcp_t *, eager, void, NULL,
1764 1764 int32_t, TCPS_LISTEN);
1765 1765
1766 1766 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss,
1767 1767 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE);
1768 1768 if (mp1 == NULL) {
1769 1769 /*
1770 1770 * Increment the ref count as we are going to
1771 1771 * enqueueing an mp in squeue
1772 1772 */
1773 1773 CONN_INC_REF(econnp);
1774 1774 goto error;
1775 1775 }
1776 1776
1777 1777 /*
1778 1778 * We need to start the rto timer. In normal case, we start
1779 1779 * the timer after sending the packet on the wire (or at
1780 1780 * least believing that packet was sent by waiting for
1781 1781 * conn_ip_output() to return). Since this is the first packet
1782 1782 * being sent on the wire for the eager, our initial tcp_rto
1783 1783 * is at least tcp_rexmit_interval_min which is a fairly
1784 1784 * large value to allow the algorithm to adjust slowly to large
1785 1785 * fluctuations of RTT during first few transmissions.
1786 1786 *
1787 1787 * Starting the timer first and then sending the packet in this
1788 1788 * case shouldn't make much difference since tcp_rexmit_interval_min
1789 1789 * is of the order of several 100ms and starting the timer
1790 1790 * first and then sending the packet will result in difference
1791 1791 * of few micro seconds.
1792 1792 *
1793 1793 * Without this optimization, we are forced to hold the fanout
1794 1794 * lock across the ipcl_bind_insert() and sending the packet
1795 1795 * so that we don't race against an incoming packet (maybe RST)
1796 1796 * for this eager.
1797 1797 *
1798 1798 * It is necessary to acquire an extra reference on the eager
1799 1799 * at this point and hold it until after tcp_send_data() to
1800 1800 * ensure against an eager close race.
1801 1801 */
1802 1802
1803 1803 CONN_INC_REF(econnp);
1804 1804
1805 1805 TCP_TIMER_RESTART(eager, eager->tcp_rto);
1806 1806
1807 1807 /*
1808 1808 * Insert the eager in its own perimeter now. We are ready to deal
1809 1809 * with any packets on eager.
1810 1810 */
1811 1811 if (ipcl_conn_insert(econnp) != 0)
1812 1812 goto error;
1813 1813
1814 1814 ASSERT(econnp->conn_ixa->ixa_notify_cookie == econnp->conn_tcp);
1815 1815 freemsg(mp);
1816 1816 /*
1817 1817 * Send the SYN-ACK. Use the right squeue so that conn_ixa is
1818 1818 * only used by one thread at a time.
1819 1819 */
1820 1820 if (econnp->conn_sqp == lconnp->conn_sqp) {
1821 1821 DTRACE_TCP5(send, mblk_t *, NULL, ip_xmit_attr_t *,
1822 1822 econnp->conn_ixa, __dtrace_tcp_void_ip_t *, mp1->b_rptr,
1823 1823 tcp_t *, eager, __dtrace_tcp_tcph_t *,
1824 1824 &mp1->b_rptr[econnp->conn_ixa->ixa_ip_hdr_length]);
1825 1825 (void) conn_ip_output(mp1, econnp->conn_ixa);
1826 1826 CONN_DEC_REF(econnp);
1827 1827 } else {
1828 1828 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_send_synack,
1829 1829 econnp, NULL, SQ_PROCESS, SQTAG_TCP_SEND_SYNACK);
1830 1830 }
1831 1831 return;
1832 1832 error:
1833 1833 freemsg(mp1);
1834 1834 eager->tcp_closemp_used = B_TRUE;
1835 1835 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15);
1836 1836 mp1 = &eager->tcp_closemp;
1837 1837 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp1, tcp_eager_kill,
1838 1838 econnp, NULL, SQ_FILL, SQTAG_TCP_CONN_REQ_2);
1839 1839
1840 1840 /*
1841 1841 * If a connection already exists, send the mp to that connections so
1842 1842 * that it can be appropriately dealt with.
1843 1843 */
1844 1844 ipst = tcps->tcps_netstack->netstack_ip;
1845 1845
1846 1846 if ((econnp = ipcl_classify(mp, ira, ipst)) != NULL) {
1847 1847 if (!IPCL_IS_CONNECTED(econnp)) {
1848 1848 /*
1849 1849 * Something bad happened. ipcl_conn_insert()
1850 1850 * failed because a connection already existed
1851 1851 * in connected hash but we can't find it
1852 1852 * anymore (someone blew it away). Just
1853 1853 * free this message and hopefully remote
1854 1854 * will retransmit at which time the SYN can be
1855 1855 * treated as a new connection or dealth with
1856 1856 * a TH_RST if a connection already exists.
1857 1857 */
1858 1858 CONN_DEC_REF(econnp);
1859 1859 freemsg(mp);
1860 1860 } else {
1861 1861 SQUEUE_ENTER_ONE(econnp->conn_sqp, mp, tcp_input_data,
1862 1862 econnp, ira, SQ_FILL, SQTAG_TCP_CONN_REQ_1);
1863 1863 }
1864 1864 } else {
1865 1865 /* Nobody wants this packet */
1866 1866 freemsg(mp);
1867 1867 }
1868 1868 return;
1869 1869 error3:
1870 1870 CONN_DEC_REF(econnp);
1871 1871 error2:
1872 1872 freemsg(mp);
1873 1873 if (tlc_set)
1874 1874 atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1);
1875 1875 }
1876 1876
1877 1877 /*
1878 1878 * In an ideal case of vertical partition in NUMA architecture, its
1879 1879 * beneficial to have the listener and all the incoming connections
1880 1880 * tied to the same squeue. The other constraint is that incoming
1881 1881 * connections should be tied to the squeue attached to interrupted
1882 1882 * CPU for obvious locality reason so this leaves the listener to
1883 1883 * be tied to the same squeue. Our only problem is that when listener
1884 1884 * is binding, the CPU that will get interrupted by the NIC whose
1885 1885 * IP address the listener is binding to is not even known. So
1886 1886 * the code below allows us to change that binding at the time the
1887 1887 * CPU is interrupted by virtue of incoming connection's squeue.
1888 1888 *
1889 1889 * This is usefull only in case of a listener bound to a specific IP
1890 1890 * address. For other kind of listeners, they get bound the
1891 1891 * very first time and there is no attempt to rebind them.
1892 1892 */
1893 1893 void
1894 1894 tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
1895 1895 ip_recv_attr_t *ira)
1896 1896 {
1897 1897 conn_t *connp = (conn_t *)arg;
1898 1898 squeue_t *sqp = (squeue_t *)arg2;
1899 1899 squeue_t *new_sqp;
1900 1900 uint32_t conn_flags;
1901 1901
1902 1902 /*
1903 1903 * IP sets ira_sqp to either the senders conn_sqp (for loopback)
1904 1904 * or based on the ring (for packets from GLD). Otherwise it is
1905 1905 * set based on lbolt i.e., a somewhat random number.
1906 1906 */
1907 1907 ASSERT(ira->ira_sqp != NULL);
1908 1908 new_sqp = ira->ira_sqp;
1909 1909
1910 1910 if (connp->conn_fanout == NULL)
1911 1911 goto done;
1912 1912
1913 1913 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) {
1914 1914 mutex_enter(&connp->conn_fanout->connf_lock);
1915 1915 mutex_enter(&connp->conn_lock);
1916 1916 /*
1917 1917 * No one from read or write side can access us now
1918 1918 * except for already queued packets on this squeue.
1919 1919 * But since we haven't changed the squeue yet, they
1920 1920 * can't execute. If they are processed after we have
1921 1921 * changed the squeue, they are sent back to the
1922 1922 * correct squeue down below.
1923 1923 * But a listner close can race with processing of
1924 1924 * incoming SYN. If incoming SYN processing changes
1925 1925 * the squeue then the listener close which is waiting
1926 1926 * to enter the squeue would operate on the wrong
1927 1927 * squeue. Hence we don't change the squeue here unless
1928 1928 * the refcount is exactly the minimum refcount. The
1929 1929 * minimum refcount of 4 is counted as - 1 each for
1930 1930 * TCP and IP, 1 for being in the classifier hash, and
1931 1931 * 1 for the mblk being processed.
↓ open down ↓ |
1931 lines elided |
↑ open up ↑ |
1932 1932 */
1933 1933
1934 1934 if (connp->conn_ref != 4 ||
1935 1935 connp->conn_tcp->tcp_state != TCPS_LISTEN) {
1936 1936 mutex_exit(&connp->conn_lock);
1937 1937 mutex_exit(&connp->conn_fanout->connf_lock);
1938 1938 goto done;
1939 1939 }
1940 1940 if (connp->conn_sqp != new_sqp) {
1941 1941 while (connp->conn_sqp != new_sqp)
1942 - (void) casptr(&connp->conn_sqp, sqp, new_sqp);
1942 + (void) atomic_cas_ptr(&connp->conn_sqp, sqp,
1943 + new_sqp);
1943 1944 /* No special MT issues for outbound ixa_sqp hint */
1944 1945 connp->conn_ixa->ixa_sqp = new_sqp;
1945 1946 }
1946 1947
1947 1948 do {
1948 1949 conn_flags = connp->conn_flags;
1949 1950 conn_flags |= IPCL_FULLY_BOUND;
1950 - (void) cas32(&connp->conn_flags, connp->conn_flags,
1951 - conn_flags);
1951 + (void) atomic_cas_32(&connp->conn_flags,
1952 + connp->conn_flags, conn_flags);
1952 1953 } while (!(connp->conn_flags & IPCL_FULLY_BOUND));
1953 1954
1954 1955 mutex_exit(&connp->conn_fanout->connf_lock);
1955 1956 mutex_exit(&connp->conn_lock);
1956 1957
1957 1958 /*
1958 1959 * Assume we have picked a good squeue for the listener. Make
1959 1960 * subsequent SYNs not try to change the squeue.
1960 1961 */
1961 1962 connp->conn_recv = tcp_input_listener;
1962 1963 }
1963 1964
1964 1965 done:
1965 1966 if (connp->conn_sqp != sqp) {
1966 1967 CONN_INC_REF(connp);
1967 1968 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, connp->conn_recv, connp,
1968 1969 ira, SQ_FILL, SQTAG_TCP_CONN_REQ_UNBOUND);
1969 1970 } else {
1970 1971 tcp_input_listener(connp, mp, sqp, ira);
1971 1972 }
1972 1973 }
1973 1974
1974 1975 /*
1975 1976 * Send up all messages queued on tcp_rcv_list.
1976 1977 */
1977 1978 uint_t
1978 1979 tcp_rcv_drain(tcp_t *tcp)
1979 1980 {
1980 1981 mblk_t *mp;
1981 1982 uint_t ret = 0;
1982 1983 #ifdef DEBUG
1983 1984 uint_t cnt = 0;
1984 1985 #endif
1985 1986 queue_t *q = tcp->tcp_connp->conn_rq;
1986 1987
1987 1988 /* Can't drain on an eager connection */
1988 1989 if (tcp->tcp_listener != NULL)
1989 1990 return (ret);
1990 1991
1991 1992 /* Can't be a non-STREAMS connection */
1992 1993 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
1993 1994
1994 1995 /* No need for the push timer now. */
1995 1996 if (tcp->tcp_push_tid != 0) {
1996 1997 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
1997 1998 tcp->tcp_push_tid = 0;
1998 1999 }
1999 2000
2000 2001 /*
2001 2002 * Handle two cases here: we are currently fused or we were
2002 2003 * previously fused and have some urgent data to be delivered
2003 2004 * upstream. The latter happens because we either ran out of
2004 2005 * memory or were detached and therefore sending the SIGURG was
2005 2006 * deferred until this point. In either case we pass control
2006 2007 * over to tcp_fuse_rcv_drain() since it may need to complete
2007 2008 * some work.
2008 2009 */
2009 2010 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) {
2010 2011 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL :
2011 2012 &tcp->tcp_fused_sigurg_mp))
2012 2013 return (ret);
2013 2014 }
2014 2015
2015 2016 while ((mp = tcp->tcp_rcv_list) != NULL) {
2016 2017 tcp->tcp_rcv_list = mp->b_next;
2017 2018 mp->b_next = NULL;
2018 2019 #ifdef DEBUG
2019 2020 cnt += msgdsize(mp);
2020 2021 #endif
2021 2022 putnext(q, mp);
2022 2023 }
2023 2024 #ifdef DEBUG
2024 2025 ASSERT(cnt == tcp->tcp_rcv_cnt);
2025 2026 #endif
2026 2027 tcp->tcp_rcv_last_head = NULL;
2027 2028 tcp->tcp_rcv_last_tail = NULL;
2028 2029 tcp->tcp_rcv_cnt = 0;
2029 2030
2030 2031 if (canputnext(q))
2031 2032 return (tcp_rwnd_reopen(tcp));
2032 2033
2033 2034 return (ret);
2034 2035 }
2035 2036
2036 2037 /*
2037 2038 * Queue data on tcp_rcv_list which is a b_next chain.
2038 2039 * tcp_rcv_last_head/tail is the last element of this chain.
2039 2040 * Each element of the chain is a b_cont chain.
2040 2041 *
2041 2042 * M_DATA messages are added to the current element.
2042 2043 * Other messages are added as new (b_next) elements.
2043 2044 */
2044 2045 void
2045 2046 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len, cred_t *cr)
2046 2047 {
2047 2048 ASSERT(seg_len == msgdsize(mp));
2048 2049 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL);
2049 2050
2050 2051 if (is_system_labeled()) {
2051 2052 ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL);
2052 2053 /*
2053 2054 * Provide for protocols above TCP such as RPC. NOPID leaves
2054 2055 * db_cpid unchanged.
2055 2056 * The cred could have already been set.
2056 2057 */
2057 2058 if (cr != NULL)
2058 2059 mblk_setcred(mp, cr, NOPID);
2059 2060 }
2060 2061
2061 2062 if (tcp->tcp_rcv_list == NULL) {
2062 2063 ASSERT(tcp->tcp_rcv_last_head == NULL);
2063 2064 tcp->tcp_rcv_list = mp;
2064 2065 tcp->tcp_rcv_last_head = mp;
2065 2066 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) {
2066 2067 tcp->tcp_rcv_last_tail->b_cont = mp;
2067 2068 } else {
2068 2069 tcp->tcp_rcv_last_head->b_next = mp;
2069 2070 tcp->tcp_rcv_last_head = mp;
2070 2071 }
2071 2072
2072 2073 while (mp->b_cont)
2073 2074 mp = mp->b_cont;
2074 2075
2075 2076 tcp->tcp_rcv_last_tail = mp;
2076 2077 tcp->tcp_rcv_cnt += seg_len;
2077 2078 tcp->tcp_rwnd -= seg_len;
2078 2079 }
2079 2080
2080 2081 /* Generate an ACK-only (no data) segment for a TCP endpoint */
2081 2082 mblk_t *
2082 2083 tcp_ack_mp(tcp_t *tcp)
2083 2084 {
2084 2085 uint32_t seq_no;
2085 2086 tcp_stack_t *tcps = tcp->tcp_tcps;
2086 2087 conn_t *connp = tcp->tcp_connp;
2087 2088
2088 2089 /*
2089 2090 * There are a few cases to be considered while setting the sequence no.
2090 2091 * Essentially, we can come here while processing an unacceptable pkt
2091 2092 * in the TCPS_SYN_RCVD state, in which case we set the sequence number
2092 2093 * to snxt (per RFC 793), note the swnd wouldn't have been set yet.
2093 2094 * If we are here for a zero window probe, stick with suna. In all
2094 2095 * other cases, we check if suna + swnd encompasses snxt and set
2095 2096 * the sequence number to snxt, if so. If snxt falls outside the
2096 2097 * window (the receiver probably shrunk its window), we will go with
2097 2098 * suna + swnd, otherwise the sequence no will be unacceptable to the
2098 2099 * receiver.
2099 2100 */
2100 2101 if (tcp->tcp_zero_win_probe) {
2101 2102 seq_no = tcp->tcp_suna;
2102 2103 } else if (tcp->tcp_state == TCPS_SYN_RCVD) {
2103 2104 ASSERT(tcp->tcp_swnd == 0);
2104 2105 seq_no = tcp->tcp_snxt;
2105 2106 } else {
2106 2107 seq_no = SEQ_GT(tcp->tcp_snxt,
2107 2108 (tcp->tcp_suna + tcp->tcp_swnd)) ?
2108 2109 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt;
2109 2110 }
2110 2111
2111 2112 if (tcp->tcp_valid_bits) {
2112 2113 /*
2113 2114 * For the complex case where we have to send some
2114 2115 * controls (FIN or SYN), let tcp_xmit_mp do it.
2115 2116 */
2116 2117 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE,
2117 2118 NULL, B_FALSE));
2118 2119 } else {
2119 2120 /* Generate a simple ACK */
2120 2121 int data_length;
2121 2122 uchar_t *rptr;
2122 2123 tcpha_t *tcpha;
2123 2124 mblk_t *mp1;
2124 2125 int32_t total_hdr_len;
2125 2126 int32_t tcp_hdr_len;
2126 2127 int32_t num_sack_blk = 0;
2127 2128 int32_t sack_opt_len;
2128 2129 ip_xmit_attr_t *ixa = connp->conn_ixa;
2129 2130
2130 2131 /*
2131 2132 * Allocate space for TCP + IP headers
2132 2133 * and link-level header
2133 2134 */
2134 2135 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
2135 2136 num_sack_blk = MIN(tcp->tcp_max_sack_blk,
2136 2137 tcp->tcp_num_sack_blk);
2137 2138 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) +
2138 2139 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN;
2139 2140 total_hdr_len = connp->conn_ht_iphc_len + sack_opt_len;
2140 2141 tcp_hdr_len = connp->conn_ht_ulp_len + sack_opt_len;
2141 2142 } else {
2142 2143 total_hdr_len = connp->conn_ht_iphc_len;
2143 2144 tcp_hdr_len = connp->conn_ht_ulp_len;
2144 2145 }
2145 2146 mp1 = allocb(total_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED);
2146 2147 if (!mp1)
2147 2148 return (NULL);
2148 2149
2149 2150 /* Update the latest receive window size in TCP header. */
2150 2151 tcp->tcp_tcpha->tha_win =
2151 2152 htons(tcp->tcp_rwnd >> tcp->tcp_rcv_ws);
2152 2153 /* copy in prototype TCP + IP header */
2153 2154 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra;
2154 2155 mp1->b_rptr = rptr;
2155 2156 mp1->b_wptr = rptr + total_hdr_len;
2156 2157 bcopy(connp->conn_ht_iphc, rptr, connp->conn_ht_iphc_len);
2157 2158
2158 2159 tcpha = (tcpha_t *)&rptr[ixa->ixa_ip_hdr_length];
2159 2160
2160 2161 /* Set the TCP sequence number. */
2161 2162 tcpha->tha_seq = htonl(seq_no);
2162 2163
2163 2164 /* Set up the TCP flag field. */
2164 2165 tcpha->tha_flags = (uchar_t)TH_ACK;
2165 2166 if (tcp->tcp_ecn_echo_on)
2166 2167 tcpha->tha_flags |= TH_ECE;
2167 2168
2168 2169 tcp->tcp_rack = tcp->tcp_rnxt;
2169 2170 tcp->tcp_rack_cnt = 0;
2170 2171
2171 2172 /* fill in timestamp option if in use */
2172 2173 if (tcp->tcp_snd_ts_ok) {
2173 2174 uint32_t llbolt = (uint32_t)LBOLT_FASTPATH;
2174 2175
2175 2176 U32_TO_BE32(llbolt,
2176 2177 (char *)tcpha + TCP_MIN_HEADER_LENGTH+4);
2177 2178 U32_TO_BE32(tcp->tcp_ts_recent,
2178 2179 (char *)tcpha + TCP_MIN_HEADER_LENGTH+8);
2179 2180 }
2180 2181
2181 2182 /* Fill in SACK options */
2182 2183 if (num_sack_blk > 0) {
2183 2184 uchar_t *wptr = (uchar_t *)tcpha +
2184 2185 connp->conn_ht_ulp_len;
2185 2186 sack_blk_t *tmp;
2186 2187 int32_t i;
2187 2188
2188 2189 wptr[0] = TCPOPT_NOP;
2189 2190 wptr[1] = TCPOPT_NOP;
2190 2191 wptr[2] = TCPOPT_SACK;
2191 2192 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk *
2192 2193 sizeof (sack_blk_t);
2193 2194 wptr += TCPOPT_REAL_SACK_LEN;
2194 2195
2195 2196 tmp = tcp->tcp_sack_list;
2196 2197 for (i = 0; i < num_sack_blk; i++) {
2197 2198 U32_TO_BE32(tmp[i].begin, wptr);
2198 2199 wptr += sizeof (tcp_seq);
2199 2200 U32_TO_BE32(tmp[i].end, wptr);
2200 2201 wptr += sizeof (tcp_seq);
2201 2202 }
2202 2203 tcpha->tha_offset_and_reserved +=
2203 2204 ((num_sack_blk * 2 + 1) << 4);
2204 2205 }
2205 2206
2206 2207 ixa->ixa_pktlen = total_hdr_len;
2207 2208
2208 2209 if (ixa->ixa_flags & IXAF_IS_IPV4) {
2209 2210 ((ipha_t *)rptr)->ipha_length = htons(total_hdr_len);
2210 2211 } else {
2211 2212 ip6_t *ip6 = (ip6_t *)rptr;
2212 2213
2213 2214 ip6->ip6_plen = htons(total_hdr_len - IPV6_HDR_LEN);
2214 2215 }
2215 2216
2216 2217 /*
2217 2218 * Prime pump for checksum calculation in IP. Include the
2218 2219 * adjustment for a source route if any.
2219 2220 */
2220 2221 data_length = tcp_hdr_len + connp->conn_sum;
2221 2222 data_length = (data_length >> 16) + (data_length & 0xFFFF);
2222 2223 tcpha->tha_sum = htons(data_length);
2223 2224
2224 2225 if (tcp->tcp_ip_forward_progress) {
2225 2226 tcp->tcp_ip_forward_progress = B_FALSE;
2226 2227 connp->conn_ixa->ixa_flags |= IXAF_REACH_CONF;
2227 2228 } else {
2228 2229 connp->conn_ixa->ixa_flags &= ~IXAF_REACH_CONF;
2229 2230 }
2230 2231 return (mp1);
2231 2232 }
2232 2233 }
2233 2234
2234 2235 /*
2235 2236 * Dummy socket upcalls for if/when the conn_t gets detached from a
2236 2237 * direct-callback sonode via a user-driven close(). Easy to catch with
2237 2238 * DTrace FBT, and should be mostly harmless.
2238 2239 */
2239 2240
2240 2241 /* ARGSUSED */
2241 2242 static sock_upper_handle_t
2242 2243 tcp_dummy_newconn(sock_upper_handle_t x, sock_lower_handle_t y,
2243 2244 sock_downcalls_t *z, cred_t *cr, pid_t pid, sock_upcalls_t **ignored)
2244 2245 {
2245 2246 ASSERT(0); /* Panic in debug, otherwise ignore. */
2246 2247 return (NULL);
2247 2248 }
2248 2249
2249 2250 /* ARGSUSED */
2250 2251 static void
2251 2252 tcp_dummy_connected(sock_upper_handle_t x, sock_connid_t y, cred_t *cr,
2252 2253 pid_t pid)
2253 2254 {
2254 2255 ASSERT(x == NULL);
2255 2256 /* Normally we'd crhold(cr) and attach it to socket state. */
2256 2257 /* LINTED */
2257 2258 }
2258 2259
2259 2260 /* ARGSUSED */
2260 2261 static int
2261 2262 tcp_dummy_disconnected(sock_upper_handle_t x, sock_connid_t y, int blah)
2262 2263 {
2263 2264 ASSERT(0); /* Panic in debug, otherwise ignore. */
2264 2265 return (-1);
2265 2266 }
2266 2267
2267 2268 /* ARGSUSED */
2268 2269 static void
2269 2270 tcp_dummy_opctl(sock_upper_handle_t x, sock_opctl_action_t y, uintptr_t blah)
2270 2271 {
2271 2272 ASSERT(x == NULL);
2272 2273 /* We really want this one to be a harmless NOP for now. */
2273 2274 /* LINTED */
2274 2275 }
2275 2276
2276 2277 /* ARGSUSED */
2277 2278 static ssize_t
2278 2279 tcp_dummy_recv(sock_upper_handle_t x, mblk_t *mp, size_t len, int flags,
2279 2280 int *error, boolean_t *push)
2280 2281 {
2281 2282 ASSERT(x == NULL);
2282 2283
2283 2284 /*
2284 2285 * Consume the message, set ESHUTDOWN, and return an error.
2285 2286 * Nobody's home!
2286 2287 */
2287 2288 freemsg(mp);
2288 2289 *error = ESHUTDOWN;
2289 2290 return (-1);
2290 2291 }
2291 2292
2292 2293 /* ARGSUSED */
2293 2294 static void
2294 2295 tcp_dummy_set_proto_props(sock_upper_handle_t x, struct sock_proto_props *y)
2295 2296 {
2296 2297 ASSERT(0); /* Panic in debug, otherwise ignore. */
2297 2298 }
2298 2299
2299 2300 /* ARGSUSED */
2300 2301 static void
2301 2302 tcp_dummy_txq_full(sock_upper_handle_t x, boolean_t y)
2302 2303 {
2303 2304 ASSERT(0); /* Panic in debug, otherwise ignore. */
2304 2305 }
2305 2306
2306 2307 /* ARGSUSED */
2307 2308 static void
2308 2309 tcp_dummy_signal_oob(sock_upper_handle_t x, ssize_t len)
2309 2310 {
2310 2311 ASSERT(x == NULL);
2311 2312 /* Otherwise, this would signal socket state about OOB data. */
2312 2313 }
2313 2314
2314 2315 /* ARGSUSED */
2315 2316 static void
2316 2317 tcp_dummy_set_error(sock_upper_handle_t x, int err)
2317 2318 {
2318 2319 ASSERT(0); /* Panic in debug, otherwise ignore. */
2319 2320 }
2320 2321
2321 2322 /* ARGSUSED */
2322 2323 static void
2323 2324 tcp_dummy_onearg(sock_upper_handle_t x)
2324 2325 {
2325 2326 ASSERT(0); /* Panic in debug, otherwise ignore. */
2326 2327 }
2327 2328
2328 2329 static sock_upcalls_t tcp_dummy_upcalls = {
2329 2330 tcp_dummy_newconn,
2330 2331 tcp_dummy_connected,
2331 2332 tcp_dummy_disconnected,
2332 2333 tcp_dummy_opctl,
2333 2334 tcp_dummy_recv,
2334 2335 tcp_dummy_set_proto_props,
2335 2336 tcp_dummy_txq_full,
2336 2337 tcp_dummy_signal_oob,
2337 2338 tcp_dummy_onearg,
2338 2339 tcp_dummy_set_error,
2339 2340 tcp_dummy_onearg
2340 2341 };
2341 2342
2342 2343 /*
2343 2344 * Handle M_DATA messages from IP. Its called directly from IP via
2344 2345 * squeue for received IP packets.
2345 2346 *
2346 2347 * The first argument is always the connp/tcp to which the mp belongs.
2347 2348 * There are no exceptions to this rule. The caller has already put
2348 2349 * a reference on this connp/tcp and once tcp_input_data() returns,
2349 2350 * the squeue will do the refrele.
2350 2351 *
2351 2352 * The TH_SYN for the listener directly go to tcp_input_listener via
2352 2353 * squeue. ICMP errors go directly to tcp_icmp_input().
2353 2354 *
2354 2355 * sqp: NULL = recursive, sqp != NULL means called from squeue
2355 2356 */
2356 2357 void
2357 2358 tcp_input_data(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
2358 2359 {
2359 2360 int32_t bytes_acked;
2360 2361 int32_t gap;
2361 2362 mblk_t *mp1;
2362 2363 uint_t flags;
2363 2364 uint32_t new_swnd = 0;
2364 2365 uchar_t *iphdr;
2365 2366 uchar_t *rptr;
2366 2367 int32_t rgap;
2367 2368 uint32_t seg_ack;
2368 2369 int seg_len;
2369 2370 uint_t ip_hdr_len;
2370 2371 uint32_t seg_seq;
2371 2372 tcpha_t *tcpha;
2372 2373 int urp;
2373 2374 tcp_opt_t tcpopt;
2374 2375 ip_pkt_t ipp;
2375 2376 boolean_t ofo_seg = B_FALSE; /* Out of order segment */
2376 2377 uint32_t cwnd;
2377 2378 uint32_t add;
2378 2379 int npkt;
2379 2380 int mss;
2380 2381 conn_t *connp = (conn_t *)arg;
2381 2382 squeue_t *sqp = (squeue_t *)arg2;
2382 2383 tcp_t *tcp = connp->conn_tcp;
2383 2384 tcp_stack_t *tcps = tcp->tcp_tcps;
2384 2385 sock_upcalls_t *sockupcalls;
2385 2386
2386 2387 /*
2387 2388 * RST from fused tcp loopback peer should trigger an unfuse.
2388 2389 */
2389 2390 if (tcp->tcp_fused) {
2390 2391 TCP_STAT(tcps, tcp_fusion_aborted);
2391 2392 tcp_unfuse(tcp);
2392 2393 }
2393 2394
2394 2395 iphdr = mp->b_rptr;
2395 2396 rptr = mp->b_rptr;
2396 2397 ASSERT(OK_32PTR(rptr));
2397 2398
2398 2399 ip_hdr_len = ira->ira_ip_hdr_length;
2399 2400 if (connp->conn_recv_ancillary.crb_all != 0) {
2400 2401 /*
2401 2402 * Record packet information in the ip_pkt_t
2402 2403 */
2403 2404 ipp.ipp_fields = 0;
2404 2405 if (ira->ira_flags & IRAF_IS_IPV4) {
2405 2406 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipp,
2406 2407 B_FALSE);
2407 2408 } else {
2408 2409 uint8_t nexthdrp;
2409 2410
2410 2411 /*
2411 2412 * IPv6 packets can only be received by applications
2412 2413 * that are prepared to receive IPv6 addresses.
2413 2414 * The IP fanout must ensure this.
2414 2415 */
2415 2416 ASSERT(connp->conn_family == AF_INET6);
2416 2417
2417 2418 (void) ip_find_hdr_v6(mp, (ip6_t *)rptr, B_TRUE, &ipp,
2418 2419 &nexthdrp);
2419 2420 ASSERT(nexthdrp == IPPROTO_TCP);
2420 2421
2421 2422 /* Could have caused a pullup? */
2422 2423 iphdr = mp->b_rptr;
2423 2424 rptr = mp->b_rptr;
2424 2425 }
2425 2426 }
2426 2427 ASSERT(DB_TYPE(mp) == M_DATA);
2427 2428 ASSERT(mp->b_next == NULL);
2428 2429
2429 2430 tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2430 2431 seg_seq = ntohl(tcpha->tha_seq);
2431 2432 seg_ack = ntohl(tcpha->tha_ack);
2432 2433 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX);
2433 2434 seg_len = (int)(mp->b_wptr - rptr) -
2434 2435 (ip_hdr_len + TCP_HDR_LENGTH(tcpha));
2435 2436 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) {
2436 2437 do {
2437 2438 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
2438 2439 (uintptr_t)INT_MAX);
2439 2440 seg_len += (int)(mp1->b_wptr - mp1->b_rptr);
2440 2441 } while ((mp1 = mp1->b_cont) != NULL &&
2441 2442 mp1->b_datap->db_type == M_DATA);
2442 2443 }
2443 2444
2444 2445 DTRACE_TCP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa,
2445 2446 __dtrace_tcp_void_ip_t *, iphdr, tcp_t *, tcp,
2446 2447 __dtrace_tcp_tcph_t *, tcpha);
2447 2448
2448 2449 if (tcp->tcp_state == TCPS_TIME_WAIT) {
2449 2450 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack,
2450 2451 seg_len, tcpha, ira);
2451 2452 return;
2452 2453 }
2453 2454
2454 2455 if (sqp != NULL) {
2455 2456 /*
2456 2457 * This is the correct place to update tcp_last_recv_time. Note
2457 2458 * that it is also updated for tcp structure that belongs to
2458 2459 * global and listener queues which do not really need updating.
2459 2460 * But that should not cause any harm. And it is updated for
2460 2461 * all kinds of incoming segments, not only for data segments.
2461 2462 */
2462 2463 tcp->tcp_last_recv_time = LBOLT_FASTPATH;
2463 2464 }
2464 2465
2465 2466 flags = (unsigned int)tcpha->tha_flags & 0xFF;
2466 2467
2467 2468 BUMP_LOCAL(tcp->tcp_ibsegs);
2468 2469 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
2469 2470
2470 2471 if ((flags & TH_URG) && sqp != NULL) {
2471 2472 /*
2472 2473 * TCP can't handle urgent pointers that arrive before
2473 2474 * the connection has been accept()ed since it can't
2474 2475 * buffer OOB data. Discard segment if this happens.
2475 2476 *
2476 2477 * We can't just rely on a non-null tcp_listener to indicate
2477 2478 * that the accept() has completed since unlinking of the
2478 2479 * eager and completion of the accept are not atomic.
2479 2480 * tcp_detached, when it is not set (B_FALSE) indicates
2480 2481 * that the accept() has completed.
2481 2482 *
2482 2483 * Nor can it reassemble urgent pointers, so discard
2483 2484 * if it's not the next segment expected.
2484 2485 *
2485 2486 * Otherwise, collapse chain into one mblk (discard if
2486 2487 * that fails). This makes sure the headers, retransmitted
2487 2488 * data, and new data all are in the same mblk.
2488 2489 */
2489 2490 ASSERT(mp != NULL);
2490 2491 if (tcp->tcp_detached || !pullupmsg(mp, -1)) {
2491 2492 freemsg(mp);
2492 2493 return;
2493 2494 }
2494 2495 /* Update pointers into message */
2495 2496 iphdr = rptr = mp->b_rptr;
2496 2497 tcpha = (tcpha_t *)&rptr[ip_hdr_len];
2497 2498 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) {
2498 2499 /*
2499 2500 * Since we can't handle any data with this urgent
2500 2501 * pointer that is out of sequence, we expunge
2501 2502 * the data. This allows us to still register
2502 2503 * the urgent mark and generate the M_PCSIG,
2503 2504 * which we can do.
2504 2505 */
2505 2506 mp->b_wptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha);
2506 2507 seg_len = 0;
2507 2508 }
2508 2509 }
2509 2510
2510 2511 sockupcalls = connp->conn_upcalls;
2511 2512 /* A conn_t may have belonged to a now-closed socket. Be careful. */
2512 2513 if (sockupcalls == NULL)
2513 2514 sockupcalls = &tcp_dummy_upcalls;
2514 2515
2515 2516 switch (tcp->tcp_state) {
2516 2517 case TCPS_SYN_SENT:
2517 2518 if (connp->conn_final_sqp == NULL &&
2518 2519 tcp_outbound_squeue_switch && sqp != NULL) {
2519 2520 ASSERT(connp->conn_initial_sqp == connp->conn_sqp);
2520 2521 connp->conn_final_sqp = sqp;
2521 2522 if (connp->conn_final_sqp != connp->conn_sqp) {
2522 2523 DTRACE_PROBE1(conn__final__sqp__switch,
2523 2524 conn_t *, connp);
2524 2525 CONN_INC_REF(connp);
2525 2526 SQUEUE_SWITCH(connp, connp->conn_final_sqp);
2526 2527 SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
2527 2528 tcp_input_data, connp, ira, ip_squeue_flag,
2528 2529 SQTAG_CONNECT_FINISH);
2529 2530 return;
2530 2531 }
2531 2532 DTRACE_PROBE1(conn__final__sqp__same, conn_t *, connp);
2532 2533 }
2533 2534 if (flags & TH_ACK) {
2534 2535 /*
2535 2536 * Note that our stack cannot send data before a
2536 2537 * connection is established, therefore the
2537 2538 * following check is valid. Otherwise, it has
2538 2539 * to be changed.
2539 2540 */
2540 2541 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) ||
2541 2542 SEQ_GT(seg_ack, tcp->tcp_snxt)) {
2542 2543 freemsg(mp);
2543 2544 if (flags & TH_RST)
2544 2545 return;
2545 2546 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq",
2546 2547 tcp, seg_ack, 0, TH_RST);
2547 2548 return;
2548 2549 }
2549 2550 ASSERT(tcp->tcp_suna + 1 == seg_ack);
2550 2551 }
2551 2552 if (flags & TH_RST) {
2552 2553 if (flags & TH_ACK) {
2553 2554 DTRACE_TCP5(connect__refused, mblk_t *, NULL,
2554 2555 ip_xmit_attr_t *, connp->conn_ixa,
2555 2556 void_ip_t *, iphdr, tcp_t *, tcp,
2556 2557 tcph_t *, tcpha);
2557 2558 (void) tcp_clean_death(tcp, ECONNREFUSED);
2558 2559 }
2559 2560 freemsg(mp);
2560 2561 return;
2561 2562 }
2562 2563 if (!(flags & TH_SYN)) {
2563 2564 freemsg(mp);
2564 2565 return;
2565 2566 }
2566 2567
2567 2568 /* Process all TCP options. */
2568 2569 tcp_process_options(tcp, tcpha);
2569 2570 /*
2570 2571 * The following changes our rwnd to be a multiple of the
2571 2572 * MIN(peer MSS, our MSS) for performance reason.
2572 2573 */
2573 2574 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(connp->conn_rcvbuf,
2574 2575 tcp->tcp_mss));
2575 2576
2576 2577 /* Is the other end ECN capable? */
2577 2578 if (tcp->tcp_ecn_ok) {
2578 2579 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) {
2579 2580 tcp->tcp_ecn_ok = B_FALSE;
2580 2581 }
2581 2582 }
2582 2583 /*
2583 2584 * Clear ECN flags because it may interfere with later
2584 2585 * processing.
2585 2586 */
2586 2587 flags &= ~(TH_ECE|TH_CWR);
2587 2588
2588 2589 tcp->tcp_irs = seg_seq;
2589 2590 tcp->tcp_rack = seg_seq;
2590 2591 tcp->tcp_rnxt = seg_seq + 1;
2591 2592 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt);
2592 2593 if (!TCP_IS_DETACHED(tcp)) {
2593 2594 /* Allocate room for SACK options if needed. */
2594 2595 connp->conn_wroff = connp->conn_ht_iphc_len;
2595 2596 if (tcp->tcp_snd_sack_ok)
2596 2597 connp->conn_wroff += TCPOPT_MAX_SACK_LEN;
2597 2598 if (!tcp->tcp_loopback)
2598 2599 connp->conn_wroff += tcps->tcps_wroff_xtra;
2599 2600
2600 2601 (void) proto_set_tx_wroff(connp->conn_rq, connp,
2601 2602 connp->conn_wroff);
2602 2603 }
2603 2604 if (flags & TH_ACK) {
2604 2605 /*
2605 2606 * If we can't get the confirmation upstream, pretend
2606 2607 * we didn't even see this one.
2607 2608 *
2608 2609 * XXX: how can we pretend we didn't see it if we
2609 2610 * have updated rnxt et. al.
2610 2611 *
2611 2612 * For loopback we defer sending up the T_CONN_CON
2612 2613 * until after some checks below.
2613 2614 */
2614 2615 mp1 = NULL;
2615 2616 /*
2616 2617 * tcp_sendmsg() checks tcp_state without entering
2617 2618 * the squeue so tcp_state should be updated before
2618 2619 * sending up connection confirmation. Probe the
2619 2620 * state change below when we are sure the connection
2620 2621 * confirmation has been sent.
2621 2622 */
2622 2623 tcp->tcp_state = TCPS_ESTABLISHED;
2623 2624 if (!tcp_conn_con(tcp, iphdr, mp,
2624 2625 tcp->tcp_loopback ? &mp1 : NULL, ira)) {
2625 2626 tcp->tcp_state = TCPS_SYN_SENT;
2626 2627 freemsg(mp);
2627 2628 return;
2628 2629 }
2629 2630 TCPS_CONN_INC(tcps);
2630 2631 /* SYN was acked - making progress */
2631 2632 tcp->tcp_ip_forward_progress = B_TRUE;
2632 2633
2633 2634 /* One for the SYN */
2634 2635 tcp->tcp_suna = tcp->tcp_iss + 1;
2635 2636 tcp->tcp_valid_bits &= ~TCP_ISS_VALID;
2636 2637
2637 2638 /*
2638 2639 * If SYN was retransmitted, need to reset all
2639 2640 * retransmission info. This is because this
2640 2641 * segment will be treated as a dup ACK.
2641 2642 */
2642 2643 if (tcp->tcp_rexmit) {
2643 2644 tcp->tcp_rexmit = B_FALSE;
2644 2645 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
2645 2646 tcp->tcp_rexmit_max = tcp->tcp_snxt;
2646 2647 tcp->tcp_snd_burst = tcp->tcp_localnet ?
2647 2648 TCP_CWND_INFINITE : TCP_CWND_NORMAL;
2648 2649 tcp->tcp_ms_we_have_waited = 0;
2649 2650
2650 2651 /*
2651 2652 * Set tcp_cwnd back to 1 MSS, per
2652 2653 * recommendation from
2653 2654 * draft-floyd-incr-init-win-01.txt,
2654 2655 * Increasing TCP's Initial Window.
2655 2656 */
2656 2657 tcp->tcp_cwnd = tcp->tcp_mss;
2657 2658 }
2658 2659
2659 2660 tcp->tcp_swl1 = seg_seq;
2660 2661 tcp->tcp_swl2 = seg_ack;
2661 2662
2662 2663 new_swnd = ntohs(tcpha->tha_win);
2663 2664 tcp->tcp_swnd = new_swnd;
2664 2665 if (new_swnd > tcp->tcp_max_swnd)
2665 2666 tcp->tcp_max_swnd = new_swnd;
2666 2667
2667 2668 /*
2668 2669 * Always send the three-way handshake ack immediately
2669 2670 * in order to make the connection complete as soon as
2670 2671 * possible on the accepting host.
2671 2672 */
2672 2673 flags |= TH_ACK_NEEDED;
2673 2674
2674 2675 /*
2675 2676 * Trace connect-established here.
2676 2677 */
2677 2678 DTRACE_TCP5(connect__established, mblk_t *, NULL,
2678 2679 ip_xmit_attr_t *, tcp->tcp_connp->conn_ixa,
2679 2680 void_ip_t *, iphdr, tcp_t *, tcp, tcph_t *, tcpha);
2680 2681
2681 2682 /* Trace change from SYN_SENT -> ESTABLISHED here */
2682 2683 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
2683 2684 connp->conn_ixa, void, NULL, tcp_t *, tcp,
2684 2685 void, NULL, int32_t, TCPS_SYN_SENT);
2685 2686
2686 2687 /*
2687 2688 * Special case for loopback. At this point we have
2688 2689 * received SYN-ACK from the remote endpoint. In
2689 2690 * order to ensure that both endpoints reach the
2690 2691 * fused state prior to any data exchange, the final
2691 2692 * ACK needs to be sent before we indicate T_CONN_CON
2692 2693 * to the module upstream.
2693 2694 */
2694 2695 if (tcp->tcp_loopback) {
2695 2696 mblk_t *ack_mp;
2696 2697
2697 2698 ASSERT(!tcp->tcp_unfusable);
2698 2699 ASSERT(mp1 != NULL);
2699 2700 /*
2700 2701 * For loopback, we always get a pure SYN-ACK
2701 2702 * and only need to send back the final ACK
2702 2703 * with no data (this is because the other
2703 2704 * tcp is ours and we don't do T/TCP). This
2704 2705 * final ACK triggers the passive side to
2705 2706 * perform fusion in ESTABLISHED state.
2706 2707 */
2707 2708 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) {
2708 2709 if (tcp->tcp_ack_tid != 0) {
2709 2710 (void) TCP_TIMER_CANCEL(tcp,
2710 2711 tcp->tcp_ack_tid);
2711 2712 tcp->tcp_ack_tid = 0;
2712 2713 }
2713 2714 tcp_send_data(tcp, ack_mp);
2714 2715 BUMP_LOCAL(tcp->tcp_obsegs);
2715 2716 TCPS_BUMP_MIB(tcps, tcpOutAck);
2716 2717
2717 2718 if (!IPCL_IS_NONSTR(connp)) {
2718 2719 /* Send up T_CONN_CON */
2719 2720 if (ira->ira_cred != NULL) {
2720 2721 mblk_setcred(mp1,
2721 2722 ira->ira_cred,
2722 2723 ira->ira_cpid);
2723 2724 }
2724 2725 putnext(connp->conn_rq, mp1);
2725 2726 } else {
2726 2727 (*sockupcalls->su_connected)
2727 2728 (connp->conn_upper_handle,
2728 2729 tcp->tcp_connid,
2729 2730 ira->ira_cred,
2730 2731 ira->ira_cpid);
2731 2732 freemsg(mp1);
2732 2733 }
2733 2734
2734 2735 freemsg(mp);
2735 2736 return;
2736 2737 }
2737 2738 /*
2738 2739 * Forget fusion; we need to handle more
2739 2740 * complex cases below. Send the deferred
2740 2741 * T_CONN_CON message upstream and proceed
2741 2742 * as usual. Mark this tcp as not capable
2742 2743 * of fusion.
2743 2744 */
2744 2745 TCP_STAT(tcps, tcp_fusion_unfusable);
2745 2746 tcp->tcp_unfusable = B_TRUE;
2746 2747 if (!IPCL_IS_NONSTR(connp)) {
2747 2748 if (ira->ira_cred != NULL) {
2748 2749 mblk_setcred(mp1, ira->ira_cred,
2749 2750 ira->ira_cpid);
2750 2751 }
2751 2752 putnext(connp->conn_rq, mp1);
2752 2753 } else {
2753 2754 (*sockupcalls->su_connected)
2754 2755 (connp->conn_upper_handle,
2755 2756 tcp->tcp_connid, ira->ira_cred,
2756 2757 ira->ira_cpid);
2757 2758 freemsg(mp1);
2758 2759 }
2759 2760 }
2760 2761
2761 2762 /*
2762 2763 * Check to see if there is data to be sent. If
2763 2764 * yes, set the transmit flag. Then check to see
2764 2765 * if received data processing needs to be done.
2765 2766 * If not, go straight to xmit_check. This short
2766 2767 * cut is OK as we don't support T/TCP.
2767 2768 */
2768 2769 if (tcp->tcp_unsent)
2769 2770 flags |= TH_XMIT_NEEDED;
2770 2771
2771 2772 if (seg_len == 0 && !(flags & TH_URG)) {
2772 2773 freemsg(mp);
2773 2774 goto xmit_check;
2774 2775 }
2775 2776
2776 2777 flags &= ~TH_SYN;
2777 2778 seg_seq++;
2778 2779 break;
2779 2780 }
2780 2781 tcp->tcp_state = TCPS_SYN_RCVD;
2781 2782 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
2782 2783 connp->conn_ixa, void_ip_t *, NULL, tcp_t *, tcp,
2783 2784 tcph_t *, NULL, int32_t, TCPS_SYN_SENT);
2784 2785 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss,
2785 2786 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE);
2786 2787 if (mp1 != NULL) {
2787 2788 tcp_send_data(tcp, mp1);
2788 2789 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
2789 2790 }
2790 2791 freemsg(mp);
2791 2792 return;
2792 2793 case TCPS_SYN_RCVD:
2793 2794 if (flags & TH_ACK) {
2794 2795 uint32_t pinit_wnd;
2795 2796
2796 2797 /*
2797 2798 * In this state, a SYN|ACK packet is either bogus
2798 2799 * because the other side must be ACKing our SYN which
2799 2800 * indicates it has seen the ACK for their SYN and
2800 2801 * shouldn't retransmit it or we're crossing SYNs
2801 2802 * on active open.
2802 2803 */
2803 2804 if ((flags & TH_SYN) && !tcp->tcp_active_open) {
2804 2805 freemsg(mp);
2805 2806 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn",
2806 2807 tcp, seg_ack, 0, TH_RST);
2807 2808 return;
2808 2809 }
2809 2810 /*
2810 2811 * NOTE: RFC 793 pg. 72 says this should be
2811 2812 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt
2812 2813 * but that would mean we have an ack that ignored
2813 2814 * our SYN.
2814 2815 */
2815 2816 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) ||
2816 2817 SEQ_GT(seg_ack, tcp->tcp_snxt)) {
2817 2818 freemsg(mp);
2818 2819 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack",
2819 2820 tcp, seg_ack, 0, TH_RST);
2820 2821 return;
2821 2822 }
2822 2823 /*
2823 2824 * No sane TCP stack will send such a small window
2824 2825 * without receiving any data. Just drop this invalid
2825 2826 * ACK. We also shorten the abort timeout in case
2826 2827 * this is an attack.
2827 2828 */
2828 2829 pinit_wnd = ntohs(tcpha->tha_win) << tcp->tcp_snd_ws;
2829 2830 if (pinit_wnd < tcp->tcp_mss &&
2830 2831 pinit_wnd < tcp_init_wnd_chk) {
2831 2832 freemsg(mp);
2832 2833 TCP_STAT(tcps, tcp_zwin_ack_syn);
2833 2834 tcp->tcp_second_ctimer_threshold =
2834 2835 tcp_early_abort * SECONDS;
2835 2836 return;
2836 2837 }
2837 2838 }
2838 2839 break;
2839 2840 case TCPS_LISTEN:
2840 2841 /*
2841 2842 * Only a TLI listener can come through this path when a
2842 2843 * acceptor is going back to be a listener and a packet
2843 2844 * for the acceptor hits the classifier. For a socket
2844 2845 * listener, this can never happen because a listener
2845 2846 * can never accept connection on itself and hence a
2846 2847 * socket acceptor can not go back to being a listener.
2847 2848 */
2848 2849 ASSERT(!TCP_IS_SOCKET(tcp));
2849 2850 /*FALLTHRU*/
2850 2851 case TCPS_CLOSED:
2851 2852 case TCPS_BOUND: {
2852 2853 conn_t *new_connp;
2853 2854 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
2854 2855
2855 2856 /*
2856 2857 * Don't accept any input on a closed tcp as this TCP logically
2857 2858 * does not exist on the system. Don't proceed further with
2858 2859 * this TCP. For instance, this packet could trigger another
2859 2860 * close of this tcp which would be disastrous for tcp_refcnt.
2860 2861 * tcp_close_detached / tcp_clean_death / tcp_closei_local must
2861 2862 * be called at most once on a TCP. In this case we need to
2862 2863 * refeed the packet into the classifier and figure out where
2863 2864 * the packet should go.
2864 2865 */
2865 2866 new_connp = ipcl_classify(mp, ira, ipst);
2866 2867 if (new_connp != NULL) {
2867 2868 /* Drops ref on new_connp */
2868 2869 tcp_reinput(new_connp, mp, ira, ipst);
2869 2870 return;
2870 2871 }
2871 2872 /* We failed to classify. For now just drop the packet */
2872 2873 freemsg(mp);
2873 2874 return;
2874 2875 }
2875 2876 case TCPS_IDLE:
2876 2877 /*
2877 2878 * Handle the case where the tcp_clean_death() has happened
2878 2879 * on a connection (application hasn't closed yet) but a packet
2879 2880 * was already queued on squeue before tcp_clean_death()
2880 2881 * was processed. Calling tcp_clean_death() twice on same
2881 2882 * connection can result in weird behaviour.
2882 2883 */
2883 2884 freemsg(mp);
2884 2885 return;
2885 2886 default:
2886 2887 break;
2887 2888 }
2888 2889
2889 2890 /*
2890 2891 * Already on the correct queue/perimeter.
2891 2892 * If this is a detached connection and not an eager
2892 2893 * connection hanging off a listener then new data
2893 2894 * (past the FIN) will cause a reset.
2894 2895 * We do a special check here where it
2895 2896 * is out of the main line, rather than check
2896 2897 * if we are detached every time we see new
2897 2898 * data down below.
2898 2899 */
2899 2900 if (TCP_IS_DETACHED_NONEAGER(tcp) &&
2900 2901 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) {
2901 2902 TCPS_BUMP_MIB(tcps, tcpInClosed);
2902 2903 DTRACE_PROBE2(tcp__trace__recv, mblk_t *, mp, tcp_t *, tcp);
2903 2904 freemsg(mp);
2904 2905 tcp_xmit_ctl("new data when detached", tcp,
2905 2906 tcp->tcp_snxt, 0, TH_RST);
2906 2907 (void) tcp_clean_death(tcp, EPROTO);
2907 2908 return;
2908 2909 }
2909 2910
2910 2911 mp->b_rptr = (uchar_t *)tcpha + TCP_HDR_LENGTH(tcpha);
2911 2912 urp = ntohs(tcpha->tha_urp) - TCP_OLD_URP_INTERPRETATION;
2912 2913 new_swnd = ntohs(tcpha->tha_win) <<
2913 2914 ((tcpha->tha_flags & TH_SYN) ? 0 : tcp->tcp_snd_ws);
2914 2915
2915 2916 if (tcp->tcp_snd_ts_ok) {
2916 2917 if (!tcp_paws_check(tcp, tcpha, &tcpopt)) {
2917 2918 /*
2918 2919 * This segment is not acceptable.
2919 2920 * Drop it and send back an ACK.
2920 2921 */
2921 2922 freemsg(mp);
2922 2923 flags |= TH_ACK_NEEDED;
2923 2924 goto ack_check;
2924 2925 }
2925 2926 } else if (tcp->tcp_snd_sack_ok) {
2926 2927 tcpopt.tcp = tcp;
2927 2928 /*
2928 2929 * SACK info in already updated in tcp_parse_options. Ignore
2929 2930 * all other TCP options...
2930 2931 */
2931 2932 (void) tcp_parse_options(tcpha, &tcpopt);
2932 2933 }
2933 2934 try_again:;
2934 2935 mss = tcp->tcp_mss;
2935 2936 gap = seg_seq - tcp->tcp_rnxt;
2936 2937 rgap = tcp->tcp_rwnd - (gap + seg_len);
2937 2938 /*
2938 2939 * gap is the amount of sequence space between what we expect to see
2939 2940 * and what we got for seg_seq. A positive value for gap means
2940 2941 * something got lost. A negative value means we got some old stuff.
2941 2942 */
2942 2943 if (gap < 0) {
2943 2944 /* Old stuff present. Is the SYN in there? */
2944 2945 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) &&
2945 2946 (seg_len != 0)) {
2946 2947 flags &= ~TH_SYN;
2947 2948 seg_seq++;
2948 2949 urp--;
2949 2950 /* Recompute the gaps after noting the SYN. */
2950 2951 goto try_again;
2951 2952 }
2952 2953 TCPS_BUMP_MIB(tcps, tcpInDataDupSegs);
2953 2954 TCPS_UPDATE_MIB(tcps, tcpInDataDupBytes,
2954 2955 (seg_len > -gap ? -gap : seg_len));
2955 2956 /* Remove the old stuff from seg_len. */
2956 2957 seg_len += gap;
2957 2958 /*
2958 2959 * Anything left?
2959 2960 * Make sure to check for unack'd FIN when rest of data
2960 2961 * has been previously ack'd.
2961 2962 */
2962 2963 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) {
2963 2964 /*
2964 2965 * Resets are only valid if they lie within our offered
2965 2966 * window. If the RST bit is set, we just ignore this
2966 2967 * segment.
2967 2968 */
2968 2969 if (flags & TH_RST) {
2969 2970 freemsg(mp);
2970 2971 return;
2971 2972 }
2972 2973
2973 2974 /*
2974 2975 * The arriving of dup data packets indicate that we
2975 2976 * may have postponed an ack for too long, or the other
2976 2977 * side's RTT estimate is out of shape. Start acking
2977 2978 * more often.
2978 2979 */
2979 2980 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) &&
2980 2981 tcp->tcp_rack_cnt >= 1 &&
2981 2982 tcp->tcp_rack_abs_max > 2) {
2982 2983 tcp->tcp_rack_abs_max--;
2983 2984 }
2984 2985 tcp->tcp_rack_cur_max = 1;
2985 2986
2986 2987 /*
2987 2988 * This segment is "unacceptable". None of its
2988 2989 * sequence space lies within our advertized window.
2989 2990 *
2990 2991 * Adjust seg_len to the original value for tracing.
2991 2992 */
2992 2993 seg_len -= gap;
2993 2994 if (connp->conn_debug) {
2994 2995 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
2995 2996 "tcp_rput: unacceptable, gap %d, rgap %d, "
2996 2997 "flags 0x%x, seg_seq %u, seg_ack %u, "
2997 2998 "seg_len %d, rnxt %u, snxt %u, %s",
2998 2999 gap, rgap, flags, seg_seq, seg_ack,
2999 3000 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt,
3000 3001 tcp_display(tcp, NULL,
3001 3002 DISP_ADDR_AND_PORT));
3002 3003 }
3003 3004
3004 3005 /*
3005 3006 * Arrange to send an ACK in response to the
3006 3007 * unacceptable segment per RFC 793 page 69. There
3007 3008 * is only one small difference between ours and the
3008 3009 * acceptability test in the RFC - we accept ACK-only
3009 3010 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK
3010 3011 * will be generated.
3011 3012 *
3012 3013 * Note that we have to ACK an ACK-only packet at least
3013 3014 * for stacks that send 0-length keep-alives with
3014 3015 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122,
3015 3016 * section 4.2.3.6. As long as we don't ever generate
3016 3017 * an unacceptable packet in response to an incoming
3017 3018 * packet that is unacceptable, it should not cause
3018 3019 * "ACK wars".
3019 3020 */
3020 3021 flags |= TH_ACK_NEEDED;
3021 3022
3022 3023 /*
3023 3024 * Continue processing this segment in order to use the
3024 3025 * ACK information it contains, but skip all other
3025 3026 * sequence-number processing. Processing the ACK
3026 3027 * information is necessary in order to
3027 3028 * re-synchronize connections that may have lost
3028 3029 * synchronization.
3029 3030 *
3030 3031 * We clear seg_len and flag fields related to
3031 3032 * sequence number processing as they are not
3032 3033 * to be trusted for an unacceptable segment.
3033 3034 */
3034 3035 seg_len = 0;
3035 3036 flags &= ~(TH_SYN | TH_FIN | TH_URG);
3036 3037 goto process_ack;
3037 3038 }
3038 3039
3039 3040 /* Fix seg_seq, and chew the gap off the front. */
3040 3041 seg_seq = tcp->tcp_rnxt;
3041 3042 urp += gap;
3042 3043 do {
3043 3044 mblk_t *mp2;
3044 3045 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
3045 3046 (uintptr_t)UINT_MAX);
3046 3047 gap += (uint_t)(mp->b_wptr - mp->b_rptr);
3047 3048 if (gap > 0) {
3048 3049 mp->b_rptr = mp->b_wptr - gap;
3049 3050 break;
3050 3051 }
3051 3052 mp2 = mp;
3052 3053 mp = mp->b_cont;
3053 3054 freeb(mp2);
3054 3055 } while (gap < 0);
3055 3056 /*
3056 3057 * If the urgent data has already been acknowledged, we
3057 3058 * should ignore TH_URG below
3058 3059 */
3059 3060 if (urp < 0)
3060 3061 flags &= ~TH_URG;
3061 3062 }
3062 3063 /*
3063 3064 * rgap is the amount of stuff received out of window. A negative
3064 3065 * value is the amount out of window.
3065 3066 */
3066 3067 if (rgap < 0) {
3067 3068 mblk_t *mp2;
3068 3069
3069 3070 if (tcp->tcp_rwnd == 0) {
3070 3071 TCPS_BUMP_MIB(tcps, tcpInWinProbe);
3071 3072 } else {
3072 3073 TCPS_BUMP_MIB(tcps, tcpInDataPastWinSegs);
3073 3074 TCPS_UPDATE_MIB(tcps, tcpInDataPastWinBytes, -rgap);
3074 3075 }
3075 3076
3076 3077 /*
3077 3078 * seg_len does not include the FIN, so if more than
3078 3079 * just the FIN is out of window, we act like we don't
3079 3080 * see it. (If just the FIN is out of window, rgap
3080 3081 * will be zero and we will go ahead and acknowledge
3081 3082 * the FIN.)
3082 3083 */
3083 3084 flags &= ~TH_FIN;
3084 3085
3085 3086 /* Fix seg_len and make sure there is something left. */
3086 3087 seg_len += rgap;
3087 3088 if (seg_len <= 0) {
3088 3089 /*
3089 3090 * Resets are only valid if they lie within our offered
3090 3091 * window. If the RST bit is set, we just ignore this
3091 3092 * segment.
3092 3093 */
3093 3094 if (flags & TH_RST) {
3094 3095 freemsg(mp);
3095 3096 return;
3096 3097 }
3097 3098
3098 3099 /* Per RFC 793, we need to send back an ACK. */
3099 3100 flags |= TH_ACK_NEEDED;
3100 3101
3101 3102 /*
3102 3103 * Send SIGURG as soon as possible i.e. even
3103 3104 * if the TH_URG was delivered in a window probe
3104 3105 * packet (which will be unacceptable).
3105 3106 *
3106 3107 * We generate a signal if none has been generated
3107 3108 * for this connection or if this is a new urgent
3108 3109 * byte. Also send a zero-length "unmarked" message
3109 3110 * to inform SIOCATMARK that this is not the mark.
3110 3111 *
3111 3112 * tcp_urp_last_valid is cleared when the T_exdata_ind
3112 3113 * is sent up. This plus the check for old data
3113 3114 * (gap >= 0) handles the wraparound of the sequence
3114 3115 * number space without having to always track the
3115 3116 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks
3116 3117 * this max in its rcv_up variable).
3117 3118 *
3118 3119 * This prevents duplicate SIGURGS due to a "late"
3119 3120 * zero-window probe when the T_EXDATA_IND has already
3120 3121 * been sent up.
3121 3122 */
3122 3123 if ((flags & TH_URG) &&
3123 3124 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq,
3124 3125 tcp->tcp_urp_last))) {
3125 3126 if (IPCL_IS_NONSTR(connp)) {
3126 3127 if (!TCP_IS_DETACHED(tcp)) {
3127 3128 (*sockupcalls->su_signal_oob)
3128 3129 (connp->conn_upper_handle,
3129 3130 urp);
3130 3131 }
3131 3132 } else {
3132 3133 mp1 = allocb(0, BPRI_MED);
3133 3134 if (mp1 == NULL) {
3134 3135 freemsg(mp);
3135 3136 return;
3136 3137 }
3137 3138 if (!TCP_IS_DETACHED(tcp) &&
3138 3139 !putnextctl1(connp->conn_rq,
3139 3140 M_PCSIG, SIGURG)) {
3140 3141 /* Try again on the rexmit. */
3141 3142 freemsg(mp1);
3142 3143 freemsg(mp);
3143 3144 return;
3144 3145 }
3145 3146 /*
3146 3147 * If the next byte would be the mark
3147 3148 * then mark with MARKNEXT else mark
3148 3149 * with NOTMARKNEXT.
3149 3150 */
3150 3151 if (gap == 0 && urp == 0)
3151 3152 mp1->b_flag |= MSGMARKNEXT;
3152 3153 else
3153 3154 mp1->b_flag |= MSGNOTMARKNEXT;
3154 3155 freemsg(tcp->tcp_urp_mark_mp);
3155 3156 tcp->tcp_urp_mark_mp = mp1;
3156 3157 flags |= TH_SEND_URP_MARK;
3157 3158 }
3158 3159 tcp->tcp_urp_last_valid = B_TRUE;
3159 3160 tcp->tcp_urp_last = urp + seg_seq;
3160 3161 }
3161 3162 /*
3162 3163 * If this is a zero window probe, continue to
3163 3164 * process the ACK part. But we need to set seg_len
3164 3165 * to 0 to avoid data processing. Otherwise just
3165 3166 * drop the segment and send back an ACK.
3166 3167 */
3167 3168 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) {
3168 3169 flags &= ~(TH_SYN | TH_URG);
3169 3170 seg_len = 0;
3170 3171 goto process_ack;
3171 3172 } else {
3172 3173 freemsg(mp);
3173 3174 goto ack_check;
3174 3175 }
3175 3176 }
3176 3177 /* Pitch out of window stuff off the end. */
3177 3178 rgap = seg_len;
3178 3179 mp2 = mp;
3179 3180 do {
3180 3181 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <=
3181 3182 (uintptr_t)INT_MAX);
3182 3183 rgap -= (int)(mp2->b_wptr - mp2->b_rptr);
3183 3184 if (rgap < 0) {
3184 3185 mp2->b_wptr += rgap;
3185 3186 if ((mp1 = mp2->b_cont) != NULL) {
3186 3187 mp2->b_cont = NULL;
3187 3188 freemsg(mp1);
3188 3189 }
3189 3190 break;
3190 3191 }
3191 3192 } while ((mp2 = mp2->b_cont) != NULL);
3192 3193 }
3193 3194 ok:;
3194 3195 /*
3195 3196 * TCP should check ECN info for segments inside the window only.
3196 3197 * Therefore the check should be done here.
3197 3198 */
3198 3199 if (tcp->tcp_ecn_ok) {
3199 3200 if (flags & TH_CWR) {
3200 3201 tcp->tcp_ecn_echo_on = B_FALSE;
3201 3202 }
3202 3203 /*
3203 3204 * Note that both ECN_CE and CWR can be set in the
3204 3205 * same segment. In this case, we once again turn
3205 3206 * on ECN_ECHO.
3206 3207 */
3207 3208 if (connp->conn_ipversion == IPV4_VERSION) {
3208 3209 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service;
3209 3210
3210 3211 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) {
3211 3212 tcp->tcp_ecn_echo_on = B_TRUE;
3212 3213 }
3213 3214 } else {
3214 3215 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf;
3215 3216
3216 3217 if ((vcf & htonl(IPH_ECN_CE << 20)) ==
3217 3218 htonl(IPH_ECN_CE << 20)) {
3218 3219 tcp->tcp_ecn_echo_on = B_TRUE;
3219 3220 }
3220 3221 }
3221 3222 }
3222 3223
3223 3224 /*
3224 3225 * Check whether we can update tcp_ts_recent. This test is
3225 3226 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP
3226 3227 * Extensions for High Performance: An Update", Internet Draft.
3227 3228 */
3228 3229 if (tcp->tcp_snd_ts_ok &&
3229 3230 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) &&
3230 3231 SEQ_LEQ(seg_seq, tcp->tcp_rack)) {
3231 3232 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val;
3232 3233 tcp->tcp_last_rcv_lbolt = LBOLT_FASTPATH64;
3233 3234 }
3234 3235
3235 3236 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) {
3236 3237 /*
3237 3238 * FIN in an out of order segment. We record this in
3238 3239 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq.
3239 3240 * Clear the FIN so that any check on FIN flag will fail.
3240 3241 * Remember that FIN also counts in the sequence number
3241 3242 * space. So we need to ack out of order FIN only segments.
3242 3243 */
3243 3244 if (flags & TH_FIN) {
3244 3245 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID;
3245 3246 tcp->tcp_ofo_fin_seq = seg_seq + seg_len;
3246 3247 flags &= ~TH_FIN;
3247 3248 flags |= TH_ACK_NEEDED;
3248 3249 }
3249 3250 if (seg_len > 0) {
3250 3251 /* Fill in the SACK blk list. */
3251 3252 if (tcp->tcp_snd_sack_ok) {
3252 3253 tcp_sack_insert(tcp->tcp_sack_list,
3253 3254 seg_seq, seg_seq + seg_len,
3254 3255 &(tcp->tcp_num_sack_blk));
3255 3256 }
3256 3257
3257 3258 /*
3258 3259 * Attempt reassembly and see if we have something
3259 3260 * ready to go.
3260 3261 */
3261 3262 mp = tcp_reass(tcp, mp, seg_seq);
3262 3263 /* Always ack out of order packets */
3263 3264 flags |= TH_ACK_NEEDED | TH_PUSH;
3264 3265 if (mp) {
3265 3266 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
3266 3267 (uintptr_t)INT_MAX);
3267 3268 seg_len = mp->b_cont ? msgdsize(mp) :
3268 3269 (int)(mp->b_wptr - mp->b_rptr);
3269 3270 seg_seq = tcp->tcp_rnxt;
3270 3271 /*
3271 3272 * A gap is filled and the seq num and len
3272 3273 * of the gap match that of a previously
3273 3274 * received FIN, put the FIN flag back in.
3274 3275 */
3275 3276 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) &&
3276 3277 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) {
3277 3278 flags |= TH_FIN;
3278 3279 tcp->tcp_valid_bits &=
3279 3280 ~TCP_OFO_FIN_VALID;
3280 3281 }
3281 3282 if (tcp->tcp_reass_tid != 0) {
3282 3283 (void) TCP_TIMER_CANCEL(tcp,
3283 3284 tcp->tcp_reass_tid);
3284 3285 /*
3285 3286 * Restart the timer if there is still
3286 3287 * data in the reassembly queue.
3287 3288 */
3288 3289 if (tcp->tcp_reass_head != NULL) {
3289 3290 tcp->tcp_reass_tid = TCP_TIMER(
3290 3291 tcp, tcp_reass_timer,
3291 3292 tcps->tcps_reass_timeout);
3292 3293 } else {
3293 3294 tcp->tcp_reass_tid = 0;
3294 3295 }
3295 3296 }
3296 3297 } else {
3297 3298 /*
3298 3299 * Keep going even with NULL mp.
3299 3300 * There may be a useful ACK or something else
3300 3301 * we don't want to miss.
3301 3302 *
3302 3303 * But TCP should not perform fast retransmit
3303 3304 * because of the ack number. TCP uses
3304 3305 * seg_len == 0 to determine if it is a pure
3305 3306 * ACK. And this is not a pure ACK.
3306 3307 */
3307 3308 seg_len = 0;
3308 3309 ofo_seg = B_TRUE;
3309 3310
3310 3311 if (tcps->tcps_reass_timeout != 0 &&
3311 3312 tcp->tcp_reass_tid == 0) {
3312 3313 tcp->tcp_reass_tid = TCP_TIMER(tcp,
3313 3314 tcp_reass_timer,
3314 3315 tcps->tcps_reass_timeout);
3315 3316 }
3316 3317 }
3317 3318 }
3318 3319 } else if (seg_len > 0) {
3319 3320 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs);
3320 3321 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, seg_len);
3321 3322 /*
3322 3323 * If an out of order FIN was received before, and the seq
3323 3324 * num and len of the new segment match that of the FIN,
3324 3325 * put the FIN flag back in.
3325 3326 */
3326 3327 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) &&
3327 3328 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) {
3328 3329 flags |= TH_FIN;
3329 3330 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID;
3330 3331 }
3331 3332 }
3332 3333 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) {
3333 3334 if (flags & TH_RST) {
3334 3335 freemsg(mp);
3335 3336 switch (tcp->tcp_state) {
3336 3337 case TCPS_SYN_RCVD:
3337 3338 (void) tcp_clean_death(tcp, ECONNREFUSED);
3338 3339 break;
3339 3340 case TCPS_ESTABLISHED:
3340 3341 case TCPS_FIN_WAIT_1:
3341 3342 case TCPS_FIN_WAIT_2:
3342 3343 case TCPS_CLOSE_WAIT:
3343 3344 (void) tcp_clean_death(tcp, ECONNRESET);
3344 3345 break;
3345 3346 case TCPS_CLOSING:
3346 3347 case TCPS_LAST_ACK:
3347 3348 (void) tcp_clean_death(tcp, 0);
3348 3349 break;
3349 3350 default:
3350 3351 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
3351 3352 (void) tcp_clean_death(tcp, ENXIO);
3352 3353 break;
3353 3354 }
3354 3355 return;
3355 3356 }
3356 3357 if (flags & TH_SYN) {
3357 3358 /*
3358 3359 * See RFC 793, Page 71
3359 3360 *
3360 3361 * The seq number must be in the window as it should
3361 3362 * be "fixed" above. If it is outside window, it should
3362 3363 * be already rejected. Note that we allow seg_seq to be
3363 3364 * rnxt + rwnd because we want to accept 0 window probe.
3364 3365 */
3365 3366 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) &&
3366 3367 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd));
3367 3368 freemsg(mp);
3368 3369 /*
3369 3370 * If the ACK flag is not set, just use our snxt as the
3370 3371 * seq number of the RST segment.
3371 3372 */
3372 3373 if (!(flags & TH_ACK)) {
3373 3374 seg_ack = tcp->tcp_snxt;
3374 3375 }
3375 3376 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1,
3376 3377 TH_RST|TH_ACK);
3377 3378 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
3378 3379 (void) tcp_clean_death(tcp, ECONNRESET);
3379 3380 return;
3380 3381 }
3381 3382 /*
3382 3383 * urp could be -1 when the urp field in the packet is 0
3383 3384 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent
3384 3385 * byte was at seg_seq - 1, in which case we ignore the urgent flag.
3385 3386 */
3386 3387 if (flags & TH_URG && urp >= 0) {
3387 3388 if (!tcp->tcp_urp_last_valid ||
3388 3389 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) {
3389 3390 /*
3390 3391 * Non-STREAMS sockets handle the urgent data a litte
3391 3392 * differently from STREAMS based sockets. There is no
3392 3393 * need to mark any mblks with the MSG{NOT,}MARKNEXT
3393 3394 * flags to keep SIOCATMARK happy. Instead a
3394 3395 * su_signal_oob upcall is made to update the mark.
3395 3396 * Neither is a T_EXDATA_IND mblk needed to be
3396 3397 * prepended to the urgent data. The urgent data is
3397 3398 * delivered using the su_recv upcall, where we set
3398 3399 * the MSG_OOB flag to indicate that it is urg data.
3399 3400 *
3400 3401 * Neither TH_SEND_URP_MARK nor TH_MARKNEXT_NEEDED
3401 3402 * are used by non-STREAMS sockets.
3402 3403 */
3403 3404 if (IPCL_IS_NONSTR(connp)) {
3404 3405 if (!TCP_IS_DETACHED(tcp)) {
3405 3406 (*sockupcalls->su_signal_oob)
3406 3407 (connp->conn_upper_handle, urp);
3407 3408 }
3408 3409 } else {
3409 3410 /*
3410 3411 * If we haven't generated the signal yet for
3411 3412 * this urgent pointer value, do it now. Also,
3412 3413 * send up a zero-length M_DATA indicating
3413 3414 * whether or not this is the mark. The latter
3414 3415 * is not needed when a T_EXDATA_IND is sent up.
3415 3416 * However, if there are allocation failures
3416 3417 * this code relies on the sender retransmitting
3417 3418 * and the socket code for determining the mark
3418 3419 * should not block waiting for the peer to
3419 3420 * transmit. Thus, for simplicity we always
3420 3421 * send up the mark indication.
3421 3422 */
3422 3423 mp1 = allocb(0, BPRI_MED);
3423 3424 if (mp1 == NULL) {
3424 3425 freemsg(mp);
3425 3426 return;
3426 3427 }
3427 3428 if (!TCP_IS_DETACHED(tcp) &&
3428 3429 !putnextctl1(connp->conn_rq, M_PCSIG,
3429 3430 SIGURG)) {
3430 3431 /* Try again on the rexmit. */
3431 3432 freemsg(mp1);
3432 3433 freemsg(mp);
3433 3434 return;
3434 3435 }
3435 3436 /*
3436 3437 * Mark with NOTMARKNEXT for now.
3437 3438 * The code below will change this to MARKNEXT
3438 3439 * if we are at the mark.
3439 3440 *
3440 3441 * If there are allocation failures (e.g. in
3441 3442 * dupmsg below) the next time tcp_input_data
3442 3443 * sees the urgent segment it will send up the
3443 3444 * MSGMARKNEXT message.
3444 3445 */
3445 3446 mp1->b_flag |= MSGNOTMARKNEXT;
3446 3447 freemsg(tcp->tcp_urp_mark_mp);
3447 3448 tcp->tcp_urp_mark_mp = mp1;
3448 3449 flags |= TH_SEND_URP_MARK;
3449 3450 #ifdef DEBUG
3450 3451 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
3451 3452 "tcp_rput: sent M_PCSIG 2 seq %x urp %x "
3452 3453 "last %x, %s",
3453 3454 seg_seq, urp, tcp->tcp_urp_last,
3454 3455 tcp_display(tcp, NULL, DISP_PORT_ONLY));
3455 3456 #endif /* DEBUG */
3456 3457 }
3457 3458 tcp->tcp_urp_last_valid = B_TRUE;
3458 3459 tcp->tcp_urp_last = urp + seg_seq;
3459 3460 } else if (tcp->tcp_urp_mark_mp != NULL) {
3460 3461 /*
3461 3462 * An allocation failure prevented the previous
3462 3463 * tcp_input_data from sending up the allocated
3463 3464 * MSG*MARKNEXT message - send it up this time
3464 3465 * around.
3465 3466 */
3466 3467 flags |= TH_SEND_URP_MARK;
3467 3468 }
3468 3469
3469 3470 /*
3470 3471 * If the urgent byte is in this segment, make sure that it is
3471 3472 * all by itself. This makes it much easier to deal with the
3472 3473 * possibility of an allocation failure on the T_exdata_ind.
3473 3474 * Note that seg_len is the number of bytes in the segment, and
3474 3475 * urp is the offset into the segment of the urgent byte.
3475 3476 * urp < seg_len means that the urgent byte is in this segment.
3476 3477 */
3477 3478 if (urp < seg_len) {
3478 3479 if (seg_len != 1) {
3479 3480 uint32_t tmp_rnxt;
3480 3481 /*
3481 3482 * Break it up and feed it back in.
3482 3483 * Re-attach the IP header.
3483 3484 */
3484 3485 mp->b_rptr = iphdr;
3485 3486 if (urp > 0) {
3486 3487 /*
3487 3488 * There is stuff before the urgent
3488 3489 * byte.
3489 3490 */
3490 3491 mp1 = dupmsg(mp);
3491 3492 if (!mp1) {
3492 3493 /*
3493 3494 * Trim from urgent byte on.
3494 3495 * The rest will come back.
3495 3496 */
3496 3497 (void) adjmsg(mp,
3497 3498 urp - seg_len);
3498 3499 tcp_input_data(connp,
3499 3500 mp, NULL, ira);
3500 3501 return;
3501 3502 }
3502 3503 (void) adjmsg(mp1, urp - seg_len);
3503 3504 /* Feed this piece back in. */
3504 3505 tmp_rnxt = tcp->tcp_rnxt;
3505 3506 tcp_input_data(connp, mp1, NULL, ira);
3506 3507 /*
3507 3508 * If the data passed back in was not
3508 3509 * processed (ie: bad ACK) sending
3509 3510 * the remainder back in will cause a
3510 3511 * loop. In this case, drop the
3511 3512 * packet and let the sender try
3512 3513 * sending a good packet.
3513 3514 */
3514 3515 if (tmp_rnxt == tcp->tcp_rnxt) {
3515 3516 freemsg(mp);
3516 3517 return;
3517 3518 }
3518 3519 }
3519 3520 if (urp != seg_len - 1) {
3520 3521 uint32_t tmp_rnxt;
3521 3522 /*
3522 3523 * There is stuff after the urgent
3523 3524 * byte.
3524 3525 */
3525 3526 mp1 = dupmsg(mp);
3526 3527 if (!mp1) {
3527 3528 /*
3528 3529 * Trim everything beyond the
3529 3530 * urgent byte. The rest will
3530 3531 * come back.
3531 3532 */
3532 3533 (void) adjmsg(mp,
3533 3534 urp + 1 - seg_len);
3534 3535 tcp_input_data(connp,
3535 3536 mp, NULL, ira);
3536 3537 return;
3537 3538 }
3538 3539 (void) adjmsg(mp1, urp + 1 - seg_len);
3539 3540 tmp_rnxt = tcp->tcp_rnxt;
3540 3541 tcp_input_data(connp, mp1, NULL, ira);
3541 3542 /*
3542 3543 * If the data passed back in was not
3543 3544 * processed (ie: bad ACK) sending
3544 3545 * the remainder back in will cause a
3545 3546 * loop. In this case, drop the
3546 3547 * packet and let the sender try
3547 3548 * sending a good packet.
3548 3549 */
3549 3550 if (tmp_rnxt == tcp->tcp_rnxt) {
3550 3551 freemsg(mp);
3551 3552 return;
3552 3553 }
3553 3554 }
3554 3555 tcp_input_data(connp, mp, NULL, ira);
3555 3556 return;
3556 3557 }
3557 3558 /*
3558 3559 * This segment contains only the urgent byte. We
3559 3560 * have to allocate the T_exdata_ind, if we can.
3560 3561 */
3561 3562 if (IPCL_IS_NONSTR(connp)) {
3562 3563 int error;
3563 3564
3564 3565 (*sockupcalls->su_recv)
3565 3566 (connp->conn_upper_handle, mp, seg_len,
3566 3567 MSG_OOB, &error, NULL);
3567 3568 /*
3568 3569 * We should never be in middle of a
3569 3570 * fallback, the squeue guarantees that.
3570 3571 */
3571 3572 ASSERT(error != EOPNOTSUPP);
3572 3573 mp = NULL;
3573 3574 goto update_ack;
3574 3575 } else if (!tcp->tcp_urp_mp) {
3575 3576 struct T_exdata_ind *tei;
3576 3577 mp1 = allocb(sizeof (struct T_exdata_ind),
3577 3578 BPRI_MED);
3578 3579 if (!mp1) {
3579 3580 /*
3580 3581 * Sigh... It'll be back.
3581 3582 * Generate any MSG*MARK message now.
3582 3583 */
3583 3584 freemsg(mp);
3584 3585 seg_len = 0;
3585 3586 if (flags & TH_SEND_URP_MARK) {
3586 3587
3587 3588
3588 3589 ASSERT(tcp->tcp_urp_mark_mp);
3589 3590 tcp->tcp_urp_mark_mp->b_flag &=
3590 3591 ~MSGNOTMARKNEXT;
3591 3592 tcp->tcp_urp_mark_mp->b_flag |=
3592 3593 MSGMARKNEXT;
3593 3594 }
3594 3595 goto ack_check;
3595 3596 }
3596 3597 mp1->b_datap->db_type = M_PROTO;
3597 3598 tei = (struct T_exdata_ind *)mp1->b_rptr;
3598 3599 tei->PRIM_type = T_EXDATA_IND;
3599 3600 tei->MORE_flag = 0;
3600 3601 mp1->b_wptr = (uchar_t *)&tei[1];
3601 3602 tcp->tcp_urp_mp = mp1;
3602 3603 #ifdef DEBUG
3603 3604 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
3604 3605 "tcp_rput: allocated exdata_ind %s",
3605 3606 tcp_display(tcp, NULL,
3606 3607 DISP_PORT_ONLY));
3607 3608 #endif /* DEBUG */
3608 3609 /*
3609 3610 * There is no need to send a separate MSG*MARK
3610 3611 * message since the T_EXDATA_IND will be sent
3611 3612 * now.
3612 3613 */
3613 3614 flags &= ~TH_SEND_URP_MARK;
3614 3615 freemsg(tcp->tcp_urp_mark_mp);
3615 3616 tcp->tcp_urp_mark_mp = NULL;
3616 3617 }
3617 3618 /*
3618 3619 * Now we are all set. On the next putnext upstream,
3619 3620 * tcp_urp_mp will be non-NULL and will get prepended
3620 3621 * to what has to be this piece containing the urgent
3621 3622 * byte. If for any reason we abort this segment below,
3622 3623 * if it comes back, we will have this ready, or it
3623 3624 * will get blown off in close.
3624 3625 */
3625 3626 } else if (urp == seg_len) {
3626 3627 /*
3627 3628 * The urgent byte is the next byte after this sequence
3628 3629 * number. If this endpoint is non-STREAMS, then there
3629 3630 * is nothing to do here since the socket has already
3630 3631 * been notified about the urg pointer by the
3631 3632 * su_signal_oob call above.
3632 3633 *
3633 3634 * In case of STREAMS, some more work might be needed.
3634 3635 * If there is data it is marked with MSGMARKNEXT and
3635 3636 * and any tcp_urp_mark_mp is discarded since it is not
3636 3637 * needed. Otherwise, if the code above just allocated
3637 3638 * a zero-length tcp_urp_mark_mp message, that message
3638 3639 * is tagged with MSGMARKNEXT. Sending up these
3639 3640 * MSGMARKNEXT messages makes SIOCATMARK work correctly
3640 3641 * even though the T_EXDATA_IND will not be sent up
3641 3642 * until the urgent byte arrives.
3642 3643 */
3643 3644 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) {
3644 3645 if (seg_len != 0) {
3645 3646 flags |= TH_MARKNEXT_NEEDED;
3646 3647 freemsg(tcp->tcp_urp_mark_mp);
3647 3648 tcp->tcp_urp_mark_mp = NULL;
3648 3649 flags &= ~TH_SEND_URP_MARK;
3649 3650 } else if (tcp->tcp_urp_mark_mp != NULL) {
3650 3651 flags |= TH_SEND_URP_MARK;
3651 3652 tcp->tcp_urp_mark_mp->b_flag &=
3652 3653 ~MSGNOTMARKNEXT;
3653 3654 tcp->tcp_urp_mark_mp->b_flag |=
3654 3655 MSGMARKNEXT;
3655 3656 }
3656 3657 }
3657 3658 #ifdef DEBUG
3658 3659 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
3659 3660 "tcp_rput: AT MARK, len %d, flags 0x%x, %s",
3660 3661 seg_len, flags,
3661 3662 tcp_display(tcp, NULL, DISP_PORT_ONLY));
3662 3663 #endif /* DEBUG */
3663 3664 }
3664 3665 #ifdef DEBUG
3665 3666 else {
3666 3667 /* Data left until we hit mark */
3667 3668 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
3668 3669 "tcp_rput: URP %d bytes left, %s",
3669 3670 urp - seg_len, tcp_display(tcp, NULL,
3670 3671 DISP_PORT_ONLY));
3671 3672 }
3672 3673 #endif /* DEBUG */
3673 3674 }
3674 3675
3675 3676 process_ack:
3676 3677 if (!(flags & TH_ACK)) {
3677 3678 freemsg(mp);
3678 3679 goto xmit_check;
3679 3680 }
3680 3681 }
3681 3682 bytes_acked = (int)(seg_ack - tcp->tcp_suna);
3682 3683
3683 3684 if (bytes_acked > 0)
3684 3685 tcp->tcp_ip_forward_progress = B_TRUE;
3685 3686 if (tcp->tcp_state == TCPS_SYN_RCVD) {
3686 3687 /*
3687 3688 * tcp_sendmsg() checks tcp_state without entering
3688 3689 * the squeue so tcp_state should be updated before
3689 3690 * sending up a connection confirmation or a new
3690 3691 * connection indication.
3691 3692 */
3692 3693 tcp->tcp_state = TCPS_ESTABLISHED;
3693 3694
3694 3695 /*
3695 3696 * We are seeing the final ack in the three way
3696 3697 * hand shake of a active open'ed connection
3697 3698 * so we must send up a T_CONN_CON
3698 3699 */
3699 3700 if (tcp->tcp_active_open) {
3700 3701 if (!tcp_conn_con(tcp, iphdr, mp, NULL, ira)) {
3701 3702 freemsg(mp);
3702 3703 tcp->tcp_state = TCPS_SYN_RCVD;
3703 3704 return;
3704 3705 }
3705 3706 /*
3706 3707 * Don't fuse the loopback endpoints for
3707 3708 * simultaneous active opens.
3708 3709 */
3709 3710 if (tcp->tcp_loopback) {
3710 3711 TCP_STAT(tcps, tcp_fusion_unfusable);
3711 3712 tcp->tcp_unfusable = B_TRUE;
3712 3713 }
3713 3714 /*
3714 3715 * For simultaneous active open, trace receipt of final
3715 3716 * ACK as tcp:::connect-established.
3716 3717 */
3717 3718 DTRACE_TCP5(connect__established, mblk_t *, NULL,
3718 3719 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *,
3719 3720 iphdr, tcp_t *, tcp, tcph_t *, tcpha);
3720 3721 } else if (IPCL_IS_NONSTR(connp)) {
3721 3722 /*
3722 3723 * 3-way handshake has completed, so notify socket
3723 3724 * of the new connection.
3724 3725 *
3725 3726 * We are here means eager is fine but it can
3726 3727 * get a TH_RST at any point between now and till
3727 3728 * accept completes and disappear. We need to
3728 3729 * ensure that reference to eager is valid after
3729 3730 * we get out of eager's perimeter. So we do
3730 3731 * an extra refhold.
3731 3732 */
3732 3733 CONN_INC_REF(connp);
3733 3734
3734 3735 if (!tcp_newconn_notify(tcp, ira)) {
3735 3736 /*
3736 3737 * The state-change probe for SYN_RCVD ->
3737 3738 * ESTABLISHED has not fired yet. We reset
3738 3739 * the state to SYN_RCVD so that future
3739 3740 * state-change probes report correct state
3740 3741 * transistions.
3741 3742 */
3742 3743 tcp->tcp_state = TCPS_SYN_RCVD;
3743 3744 freemsg(mp);
3744 3745 /* notification did not go up, so drop ref */
3745 3746 CONN_DEC_REF(connp);
3746 3747 /* ... and close the eager */
3747 3748 ASSERT(TCP_IS_DETACHED(tcp));
3748 3749 (void) tcp_close_detached(tcp);
3749 3750 return;
3750 3751 }
3751 3752 /*
3752 3753 * tcp_newconn_notify() changes conn_upcalls and
3753 3754 * connp->conn_upper_handle. Fix things now, in case
3754 3755 * there's data attached to this ack.
3755 3756 */
3756 3757 if (connp->conn_upcalls != NULL)
3757 3758 sockupcalls = connp->conn_upcalls;
3758 3759 /*
3759 3760 * For passive open, trace receipt of final ACK as
3760 3761 * tcp:::accept-established.
3761 3762 */
3762 3763 DTRACE_TCP5(accept__established, mlbk_t *, NULL,
3763 3764 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *,
3764 3765 iphdr, tcp_t *, tcp, tcph_t *, tcpha);
3765 3766 } else {
3766 3767 /*
3767 3768 * 3-way handshake complete - this is a STREAMS based
3768 3769 * socket, so pass up the T_CONN_IND.
3769 3770 */
3770 3771 tcp_t *listener = tcp->tcp_listener;
3771 3772 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind;
3772 3773
3773 3774 tcp->tcp_tconnind_started = B_TRUE;
3774 3775 tcp->tcp_conn.tcp_eager_conn_ind = NULL;
3775 3776 ASSERT(mp != NULL);
3776 3777 /*
3777 3778 * We are here means eager is fine but it can
3778 3779 * get a TH_RST at any point between now and till
3779 3780 * accept completes and disappear. We need to
3780 3781 * ensure that reference to eager is valid after
3781 3782 * we get out of eager's perimeter. So we do
3782 3783 * an extra refhold.
3783 3784 */
3784 3785 CONN_INC_REF(connp);
3785 3786
3786 3787 /*
3787 3788 * The listener also exists because of the refhold
3788 3789 * done in tcp_input_listener. Its possible that it
3789 3790 * might have closed. We will check that once we
3790 3791 * get inside listeners context.
3791 3792 */
3792 3793 CONN_INC_REF(listener->tcp_connp);
3793 3794 if (listener->tcp_connp->conn_sqp ==
3794 3795 connp->conn_sqp) {
3795 3796 /*
3796 3797 * We optimize by not calling an SQUEUE_ENTER
3797 3798 * on the listener since we know that the
3798 3799 * listener and eager squeues are the same.
3799 3800 * We are able to make this check safely only
3800 3801 * because neither the eager nor the listener
3801 3802 * can change its squeue. Only an active connect
3802 3803 * can change its squeue
3803 3804 */
3804 3805 tcp_send_conn_ind(listener->tcp_connp, mp,
3805 3806 listener->tcp_connp->conn_sqp);
3806 3807 CONN_DEC_REF(listener->tcp_connp);
3807 3808 } else if (!tcp->tcp_loopback) {
3808 3809 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp,
3809 3810 mp, tcp_send_conn_ind,
3810 3811 listener->tcp_connp, NULL, SQ_FILL,
3811 3812 SQTAG_TCP_CONN_IND);
3812 3813 } else {
3813 3814 SQUEUE_ENTER_ONE(listener->tcp_connp->conn_sqp,
3814 3815 mp, tcp_send_conn_ind,
3815 3816 listener->tcp_connp, NULL, SQ_NODRAIN,
3816 3817 SQTAG_TCP_CONN_IND);
3817 3818 }
3818 3819 /*
3819 3820 * For passive open, trace receipt of final ACK as
3820 3821 * tcp:::accept-established.
3821 3822 */
3822 3823 DTRACE_TCP5(accept__established, mlbk_t *, NULL,
3823 3824 ip_xmit_attr_t *, connp->conn_ixa, void_ip_t *,
3824 3825 iphdr, tcp_t *, tcp, tcph_t *, tcpha);
3825 3826 }
3826 3827 TCPS_CONN_INC(tcps);
3827 3828
3828 3829 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */
3829 3830 bytes_acked--;
3830 3831 /* SYN was acked - making progress */
3831 3832 tcp->tcp_ip_forward_progress = B_TRUE;
3832 3833
3833 3834 /*
3834 3835 * If SYN was retransmitted, need to reset all
3835 3836 * retransmission info as this segment will be
3836 3837 * treated as a dup ACK.
3837 3838 */
3838 3839 if (tcp->tcp_rexmit) {
3839 3840 tcp->tcp_rexmit = B_FALSE;
3840 3841 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
3841 3842 tcp->tcp_rexmit_max = tcp->tcp_snxt;
3842 3843 tcp->tcp_snd_burst = tcp->tcp_localnet ?
3843 3844 TCP_CWND_INFINITE : TCP_CWND_NORMAL;
3844 3845 tcp->tcp_ms_we_have_waited = 0;
3845 3846 tcp->tcp_cwnd = mss;
3846 3847 }
3847 3848
3848 3849 /*
3849 3850 * We set the send window to zero here.
3850 3851 * This is needed if there is data to be
3851 3852 * processed already on the queue.
3852 3853 * Later (at swnd_update label), the
3853 3854 * "new_swnd > tcp_swnd" condition is satisfied
3854 3855 * the XMIT_NEEDED flag is set in the current
3855 3856 * (SYN_RCVD) state. This ensures tcp_wput_data() is
3856 3857 * called if there is already data on queue in
3857 3858 * this state.
3858 3859 */
3859 3860 tcp->tcp_swnd = 0;
3860 3861
3861 3862 if (new_swnd > tcp->tcp_max_swnd)
3862 3863 tcp->tcp_max_swnd = new_swnd;
3863 3864 tcp->tcp_swl1 = seg_seq;
3864 3865 tcp->tcp_swl2 = seg_ack;
3865 3866 tcp->tcp_valid_bits &= ~TCP_ISS_VALID;
3866 3867
3867 3868 /* Trace change from SYN_RCVD -> ESTABLISHED here */
3868 3869 DTRACE_TCP6(state__change, void, NULL, ip_xmit_attr_t *,
3869 3870 connp->conn_ixa, void, NULL, tcp_t *, tcp, void, NULL,
3870 3871 int32_t, TCPS_SYN_RCVD);
3871 3872
3872 3873 /* Fuse when both sides are in ESTABLISHED state */
3873 3874 if (tcp->tcp_loopback && do_tcp_fusion)
3874 3875 tcp_fuse(tcp, iphdr, tcpha);
3875 3876
3876 3877 }
3877 3878 /* This code follows 4.4BSD-Lite2 mostly. */
3878 3879 if (bytes_acked < 0)
3879 3880 goto est;
3880 3881
3881 3882 /*
3882 3883 * If TCP is ECN capable and the congestion experience bit is
3883 3884 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be
3884 3885 * done once per window (or more loosely, per RTT).
3885 3886 */
3886 3887 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max))
3887 3888 tcp->tcp_cwr = B_FALSE;
3888 3889 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) {
3889 3890 if (!tcp->tcp_cwr) {
3890 3891 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss;
3891 3892 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss;
3892 3893 tcp->tcp_cwnd = npkt * mss;
3893 3894 /*
3894 3895 * If the cwnd is 0, use the timer to clock out
3895 3896 * new segments. This is required by the ECN spec.
3896 3897 */
3897 3898 if (npkt == 0) {
3898 3899 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
3899 3900 /*
3900 3901 * This makes sure that when the ACK comes
3901 3902 * back, we will increase tcp_cwnd by 1 MSS.
3902 3903 */
3903 3904 tcp->tcp_cwnd_cnt = 0;
3904 3905 }
3905 3906 tcp->tcp_cwr = B_TRUE;
3906 3907 /*
3907 3908 * This marks the end of the current window of in
3908 3909 * flight data. That is why we don't use
3909 3910 * tcp_suna + tcp_swnd. Only data in flight can
3910 3911 * provide ECN info.
3911 3912 */
3912 3913 tcp->tcp_cwr_snd_max = tcp->tcp_snxt;
3913 3914 tcp->tcp_ecn_cwr_sent = B_FALSE;
3914 3915 }
3915 3916 }
3916 3917
3917 3918 mp1 = tcp->tcp_xmit_head;
3918 3919 if (bytes_acked == 0) {
3919 3920 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) {
3920 3921 int dupack_cnt;
3921 3922
3922 3923 TCPS_BUMP_MIB(tcps, tcpInDupAck);
3923 3924 /*
3924 3925 * Fast retransmit. When we have seen exactly three
3925 3926 * identical ACKs while we have unacked data
3926 3927 * outstanding we take it as a hint that our peer
3927 3928 * dropped something.
3928 3929 *
3929 3930 * If TCP is retransmitting, don't do fast retransmit.
3930 3931 */
3931 3932 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt &&
3932 3933 ! tcp->tcp_rexmit) {
3933 3934 /* Do Limited Transmit */
3934 3935 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) <
3935 3936 tcps->tcps_dupack_fast_retransmit) {
3936 3937 /*
3937 3938 * RFC 3042
3938 3939 *
3939 3940 * What we need to do is temporarily
3940 3941 * increase tcp_cwnd so that new
3941 3942 * data can be sent if it is allowed
3942 3943 * by the receive window (tcp_rwnd).
3943 3944 * tcp_wput_data() will take care of
3944 3945 * the rest.
3945 3946 *
3946 3947 * If the connection is SACK capable,
3947 3948 * only do limited xmit when there
3948 3949 * is SACK info.
3949 3950 *
3950 3951 * Note how tcp_cwnd is incremented.
3951 3952 * The first dup ACK will increase
3952 3953 * it by 1 MSS. The second dup ACK
3953 3954 * will increase it by 2 MSS. This
3954 3955 * means that only 1 new segment will
3955 3956 * be sent for each dup ACK.
3956 3957 */
3957 3958 if (tcp->tcp_unsent > 0 &&
3958 3959 (!tcp->tcp_snd_sack_ok ||
3959 3960 (tcp->tcp_snd_sack_ok &&
3960 3961 tcp->tcp_notsack_list != NULL))) {
3961 3962 tcp->tcp_cwnd += mss <<
3962 3963 (tcp->tcp_dupack_cnt - 1);
3963 3964 flags |= TH_LIMIT_XMIT;
3964 3965 }
3965 3966 } else if (dupack_cnt ==
3966 3967 tcps->tcps_dupack_fast_retransmit) {
3967 3968
3968 3969 /*
3969 3970 * If we have reduced tcp_ssthresh
3970 3971 * because of ECN, do not reduce it again
3971 3972 * unless it is already one window of data
3972 3973 * away. After one window of data, tcp_cwr
3973 3974 * should then be cleared. Note that
3974 3975 * for non ECN capable connection, tcp_cwr
3975 3976 * should always be false.
3976 3977 *
3977 3978 * Adjust cwnd since the duplicate
3978 3979 * ack indicates that a packet was
3979 3980 * dropped (due to congestion.)
3980 3981 */
3981 3982 if (!tcp->tcp_cwr) {
3982 3983 npkt = ((tcp->tcp_snxt -
3983 3984 tcp->tcp_suna) >> 1) / mss;
3984 3985 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) *
3985 3986 mss;
3986 3987 tcp->tcp_cwnd = (npkt +
3987 3988 tcp->tcp_dupack_cnt) * mss;
3988 3989 }
3989 3990 if (tcp->tcp_ecn_ok) {
3990 3991 tcp->tcp_cwr = B_TRUE;
3991 3992 tcp->tcp_cwr_snd_max = tcp->tcp_snxt;
3992 3993 tcp->tcp_ecn_cwr_sent = B_FALSE;
3993 3994 }
3994 3995
3995 3996 /*
3996 3997 * We do Hoe's algorithm. Refer to her
3997 3998 * paper "Improving the Start-up Behavior
3998 3999 * of a Congestion Control Scheme for TCP,"
3999 4000 * appeared in SIGCOMM'96.
4000 4001 *
4001 4002 * Save highest seq no we have sent so far.
4002 4003 * Be careful about the invisible FIN byte.
4003 4004 */
4004 4005 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) &&
4005 4006 (tcp->tcp_unsent == 0)) {
4006 4007 tcp->tcp_rexmit_max = tcp->tcp_fss;
4007 4008 } else {
4008 4009 tcp->tcp_rexmit_max = tcp->tcp_snxt;
4009 4010 }
4010 4011
4011 4012 /*
4012 4013 * Do not allow bursty traffic during.
4013 4014 * fast recovery. Refer to Fall and Floyd's
4014 4015 * paper "Simulation-based Comparisons of
4015 4016 * Tahoe, Reno and SACK TCP" (in CCR?)
4016 4017 * This is a best current practise.
4017 4018 */
4018 4019 tcp->tcp_snd_burst = TCP_CWND_SS;
4019 4020
4020 4021 /*
4021 4022 * For SACK:
4022 4023 * Calculate tcp_pipe, which is the
4023 4024 * estimated number of bytes in
4024 4025 * network.
4025 4026 *
4026 4027 * tcp_fack is the highest sack'ed seq num
4027 4028 * TCP has received.
4028 4029 *
4029 4030 * tcp_pipe is explained in the above quoted
4030 4031 * Fall and Floyd's paper. tcp_fack is
4031 4032 * explained in Mathis and Mahdavi's
4032 4033 * "Forward Acknowledgment: Refining TCP
4033 4034 * Congestion Control" in SIGCOMM '96.
4034 4035 */
4035 4036 if (tcp->tcp_snd_sack_ok) {
4036 4037 if (tcp->tcp_notsack_list != NULL) {
4037 4038 tcp->tcp_pipe = tcp->tcp_snxt -
4038 4039 tcp->tcp_fack;
4039 4040 tcp->tcp_sack_snxt = seg_ack;
4040 4041 flags |= TH_NEED_SACK_REXMIT;
4041 4042 } else {
4042 4043 /*
4043 4044 * Always initialize tcp_pipe
4044 4045 * even though we don't have
4045 4046 * any SACK info. If later
4046 4047 * we get SACK info and
4047 4048 * tcp_pipe is not initialized,
4048 4049 * funny things will happen.
4049 4050 */
4050 4051 tcp->tcp_pipe =
4051 4052 tcp->tcp_cwnd_ssthresh;
4052 4053 }
4053 4054 } else {
4054 4055 flags |= TH_REXMIT_NEEDED;
4055 4056 } /* tcp_snd_sack_ok */
4056 4057
4057 4058 } else {
4058 4059 /*
4059 4060 * Here we perform congestion
4060 4061 * avoidance, but NOT slow start.
4061 4062 * This is known as the Fast
4062 4063 * Recovery Algorithm.
4063 4064 */
4064 4065 if (tcp->tcp_snd_sack_ok &&
4065 4066 tcp->tcp_notsack_list != NULL) {
4066 4067 flags |= TH_NEED_SACK_REXMIT;
4067 4068 tcp->tcp_pipe -= mss;
4068 4069 if (tcp->tcp_pipe < 0)
4069 4070 tcp->tcp_pipe = 0;
4070 4071 } else {
4071 4072 /*
4072 4073 * We know that one more packet has
4073 4074 * left the pipe thus we can update
4074 4075 * cwnd.
4075 4076 */
4076 4077 cwnd = tcp->tcp_cwnd + mss;
4077 4078 if (cwnd > tcp->tcp_cwnd_max)
4078 4079 cwnd = tcp->tcp_cwnd_max;
4079 4080 tcp->tcp_cwnd = cwnd;
4080 4081 if (tcp->tcp_unsent > 0)
4081 4082 flags |= TH_XMIT_NEEDED;
4082 4083 }
4083 4084 }
4084 4085 }
4085 4086 } else if (tcp->tcp_zero_win_probe) {
4086 4087 /*
4087 4088 * If the window has opened, need to arrange
4088 4089 * to send additional data.
4089 4090 */
4090 4091 if (new_swnd != 0) {
4091 4092 /* tcp_suna != tcp_snxt */
4092 4093 /* Packet contains a window update */
4093 4094 TCPS_BUMP_MIB(tcps, tcpInWinUpdate);
4094 4095 tcp->tcp_zero_win_probe = 0;
4095 4096 tcp->tcp_timer_backoff = 0;
4096 4097 tcp->tcp_ms_we_have_waited = 0;
4097 4098
4098 4099 /*
4099 4100 * Transmit starting with tcp_suna since
4100 4101 * the one byte probe is not ack'ed.
4101 4102 * If TCP has sent more than one identical
4102 4103 * probe, tcp_rexmit will be set. That means
4103 4104 * tcp_ss_rexmit() will send out the one
4104 4105 * byte along with new data. Otherwise,
4105 4106 * fake the retransmission.
4106 4107 */
4107 4108 flags |= TH_XMIT_NEEDED;
4108 4109 if (!tcp->tcp_rexmit) {
4109 4110 tcp->tcp_rexmit = B_TRUE;
4110 4111 tcp->tcp_dupack_cnt = 0;
4111 4112 tcp->tcp_rexmit_nxt = tcp->tcp_suna;
4112 4113 tcp->tcp_rexmit_max = tcp->tcp_suna + 1;
4113 4114 }
4114 4115 }
4115 4116 }
4116 4117 goto swnd_update;
4117 4118 }
4118 4119
4119 4120 /*
4120 4121 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73.
4121 4122 * If the ACK value acks something that we have not yet sent, it might
4122 4123 * be an old duplicate segment. Send an ACK to re-synchronize the
4123 4124 * other side.
4124 4125 * Note: reset in response to unacceptable ACK in SYN_RECEIVE
4125 4126 * state is handled above, so we can always just drop the segment and
4126 4127 * send an ACK here.
4127 4128 *
4128 4129 * In the case where the peer shrinks the window, we see the new window
4129 4130 * update, but all the data sent previously is queued up by the peer.
4130 4131 * To account for this, in tcp_process_shrunk_swnd(), the sequence
4131 4132 * number, which was already sent, and within window, is recorded.
4132 4133 * tcp_snxt is then updated.
4133 4134 *
4134 4135 * If the window has previously shrunk, and an ACK for data not yet
4135 4136 * sent, according to tcp_snxt is recieved, it may still be valid. If
4136 4137 * the ACK is for data within the window at the time the window was
4137 4138 * shrunk, then the ACK is acceptable. In this case tcp_snxt is set to
4138 4139 * the sequence number ACK'ed.
4139 4140 *
4140 4141 * If the ACK covers all the data sent at the time the window was
4141 4142 * shrunk, we can now set tcp_is_wnd_shrnk to B_FALSE.
4142 4143 *
4143 4144 * Should we send ACKs in response to ACK only segments?
4144 4145 */
4145 4146
4146 4147 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) {
4147 4148 if ((tcp->tcp_is_wnd_shrnk) &&
4148 4149 (SEQ_LEQ(seg_ack, tcp->tcp_snxt_shrunk))) {
4149 4150 uint32_t data_acked_ahead_snxt;
4150 4151
4151 4152 data_acked_ahead_snxt = seg_ack - tcp->tcp_snxt;
4152 4153 tcp_update_xmit_tail(tcp, seg_ack);
4153 4154 tcp->tcp_unsent -= data_acked_ahead_snxt;
4154 4155 } else {
4155 4156 TCPS_BUMP_MIB(tcps, tcpInAckUnsent);
4156 4157 /* drop the received segment */
4157 4158 freemsg(mp);
4158 4159
4159 4160 /*
4160 4161 * Send back an ACK. If tcp_drop_ack_unsent_cnt is
4161 4162 * greater than 0, check if the number of such
4162 4163 * bogus ACks is greater than that count. If yes,
4163 4164 * don't send back any ACK. This prevents TCP from
4164 4165 * getting into an ACK storm if somehow an attacker
4165 4166 * successfully spoofs an acceptable segment to our
4166 4167 * peer. If this continues (count > 2 X threshold),
4167 4168 * we should abort this connection.
4168 4169 */
4169 4170 if (tcp_drop_ack_unsent_cnt > 0 &&
4170 4171 ++tcp->tcp_in_ack_unsent >
4171 4172 tcp_drop_ack_unsent_cnt) {
4172 4173 TCP_STAT(tcps, tcp_in_ack_unsent_drop);
4173 4174 if (tcp->tcp_in_ack_unsent > 2 *
4174 4175 tcp_drop_ack_unsent_cnt) {
4175 4176 (void) tcp_clean_death(tcp, EPROTO);
4176 4177 }
4177 4178 return;
4178 4179 }
4179 4180 mp = tcp_ack_mp(tcp);
4180 4181 if (mp != NULL) {
4181 4182 BUMP_LOCAL(tcp->tcp_obsegs);
4182 4183 TCPS_BUMP_MIB(tcps, tcpOutAck);
4183 4184 tcp_send_data(tcp, mp);
4184 4185 }
4185 4186 return;
4186 4187 }
4187 4188 } else if (tcp->tcp_is_wnd_shrnk && SEQ_GEQ(seg_ack,
4188 4189 tcp->tcp_snxt_shrunk)) {
4189 4190 tcp->tcp_is_wnd_shrnk = B_FALSE;
4190 4191 }
4191 4192
4192 4193 /*
4193 4194 * TCP gets a new ACK, update the notsack'ed list to delete those
4194 4195 * blocks that are covered by this ACK.
4195 4196 */
4196 4197 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) {
4197 4198 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack,
4198 4199 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list));
4199 4200 }
4200 4201
4201 4202 /*
4202 4203 * If we got an ACK after fast retransmit, check to see
4203 4204 * if it is a partial ACK. If it is not and the congestion
4204 4205 * window was inflated to account for the other side's
4205 4206 * cached packets, retract it. If it is, do Hoe's algorithm.
4206 4207 */
4207 4208 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) {
4208 4209 ASSERT(tcp->tcp_rexmit == B_FALSE);
4209 4210 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) {
4210 4211 tcp->tcp_dupack_cnt = 0;
4211 4212 /*
4212 4213 * Restore the orig tcp_cwnd_ssthresh after
4213 4214 * fast retransmit phase.
4214 4215 */
4215 4216 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) {
4216 4217 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh;
4217 4218 }
4218 4219 tcp->tcp_rexmit_max = seg_ack;
4219 4220 tcp->tcp_cwnd_cnt = 0;
4220 4221 tcp->tcp_snd_burst = tcp->tcp_localnet ?
4221 4222 TCP_CWND_INFINITE : TCP_CWND_NORMAL;
4222 4223
4223 4224 /*
4224 4225 * Remove all notsack info to avoid confusion with
4225 4226 * the next fast retrasnmit/recovery phase.
4226 4227 */
4227 4228 if (tcp->tcp_snd_sack_ok) {
4228 4229 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list,
4229 4230 tcp);
4230 4231 }
4231 4232 } else {
4232 4233 if (tcp->tcp_snd_sack_ok &&
4233 4234 tcp->tcp_notsack_list != NULL) {
4234 4235 flags |= TH_NEED_SACK_REXMIT;
4235 4236 tcp->tcp_pipe -= mss;
4236 4237 if (tcp->tcp_pipe < 0)
4237 4238 tcp->tcp_pipe = 0;
4238 4239 } else {
4239 4240 /*
4240 4241 * Hoe's algorithm:
4241 4242 *
4242 4243 * Retransmit the unack'ed segment and
4243 4244 * restart fast recovery. Note that we
4244 4245 * need to scale back tcp_cwnd to the
4245 4246 * original value when we started fast
4246 4247 * recovery. This is to prevent overly
4247 4248 * aggressive behaviour in sending new
4248 4249 * segments.
4249 4250 */
4250 4251 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh +
4251 4252 tcps->tcps_dupack_fast_retransmit * mss;
4252 4253 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd;
4253 4254 flags |= TH_REXMIT_NEEDED;
4254 4255 }
4255 4256 }
4256 4257 } else {
4257 4258 tcp->tcp_dupack_cnt = 0;
4258 4259 if (tcp->tcp_rexmit) {
4259 4260 /*
4260 4261 * TCP is retranmitting. If the ACK ack's all
4261 4262 * outstanding data, update tcp_rexmit_max and
4262 4263 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt
4263 4264 * to the correct value.
4264 4265 *
4265 4266 * Note that SEQ_LEQ() is used. This is to avoid
4266 4267 * unnecessary fast retransmit caused by dup ACKs
4267 4268 * received when TCP does slow start retransmission
4268 4269 * after a time out. During this phase, TCP may
4269 4270 * send out segments which are already received.
4270 4271 * This causes dup ACKs to be sent back.
4271 4272 */
4272 4273 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) {
4273 4274 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) {
4274 4275 tcp->tcp_rexmit_nxt = seg_ack;
4275 4276 }
4276 4277 if (seg_ack != tcp->tcp_rexmit_max) {
4277 4278 flags |= TH_XMIT_NEEDED;
4278 4279 }
4279 4280 } else {
4280 4281 tcp->tcp_rexmit = B_FALSE;
4281 4282 tcp->tcp_rexmit_nxt = tcp->tcp_snxt;
4282 4283 tcp->tcp_snd_burst = tcp->tcp_localnet ?
4283 4284 TCP_CWND_INFINITE : TCP_CWND_NORMAL;
4284 4285 }
4285 4286 tcp->tcp_ms_we_have_waited = 0;
4286 4287 }
4287 4288 }
4288 4289
4289 4290 TCPS_BUMP_MIB(tcps, tcpInAckSegs);
4290 4291 TCPS_UPDATE_MIB(tcps, tcpInAckBytes, bytes_acked);
4291 4292 tcp->tcp_suna = seg_ack;
4292 4293 if (tcp->tcp_zero_win_probe != 0) {
4293 4294 tcp->tcp_zero_win_probe = 0;
4294 4295 tcp->tcp_timer_backoff = 0;
4295 4296 }
4296 4297
4297 4298 /*
4298 4299 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed.
4299 4300 * Note that it cannot be the SYN being ack'ed. The code flow
4300 4301 * will not reach here.
4301 4302 */
4302 4303 if (mp1 == NULL) {
4303 4304 goto fin_acked;
4304 4305 }
4305 4306
4306 4307 /*
4307 4308 * Update the congestion window.
4308 4309 *
4309 4310 * If TCP is not ECN capable or TCP is ECN capable but the
4310 4311 * congestion experience bit is not set, increase the tcp_cwnd as
4311 4312 * usual.
4312 4313 */
4313 4314 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) {
4314 4315 cwnd = tcp->tcp_cwnd;
4315 4316 add = mss;
4316 4317
4317 4318 if (cwnd >= tcp->tcp_cwnd_ssthresh) {
4318 4319 /*
4319 4320 * This is to prevent an increase of less than 1 MSS of
4320 4321 * tcp_cwnd. With partial increase, tcp_wput_data()
4321 4322 * may send out tinygrams in order to preserve mblk
4322 4323 * boundaries.
4323 4324 *
4324 4325 * By initializing tcp_cwnd_cnt to new tcp_cwnd and
4325 4326 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is
4326 4327 * increased by 1 MSS for every RTTs.
4327 4328 */
4328 4329 if (tcp->tcp_cwnd_cnt <= 0) {
4329 4330 tcp->tcp_cwnd_cnt = cwnd + add;
4330 4331 } else {
4331 4332 tcp->tcp_cwnd_cnt -= add;
4332 4333 add = 0;
4333 4334 }
4334 4335 }
4335 4336 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max);
4336 4337 }
4337 4338
4338 4339 /* See if the latest urgent data has been acknowledged */
4339 4340 if ((tcp->tcp_valid_bits & TCP_URG_VALID) &&
4340 4341 SEQ_GT(seg_ack, tcp->tcp_urg))
4341 4342 tcp->tcp_valid_bits &= ~TCP_URG_VALID;
4342 4343
4343 4344 /* Can we update the RTT estimates? */
4344 4345 if (tcp->tcp_snd_ts_ok) {
4345 4346 /* Ignore zero timestamp echo-reply. */
4346 4347 if (tcpopt.tcp_opt_ts_ecr != 0) {
4347 4348 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH -
4348 4349 (int32_t)tcpopt.tcp_opt_ts_ecr);
4349 4350 }
4350 4351
4351 4352 /* If needed, restart the timer. */
4352 4353 if (tcp->tcp_set_timer == 1) {
4353 4354 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
4354 4355 tcp->tcp_set_timer = 0;
4355 4356 }
4356 4357 /*
4357 4358 * Update tcp_csuna in case the other side stops sending
4358 4359 * us timestamps.
4359 4360 */
4360 4361 tcp->tcp_csuna = tcp->tcp_snxt;
4361 4362 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) {
4362 4363 /*
4363 4364 * An ACK sequence we haven't seen before, so get the RTT
4364 4365 * and update the RTO. But first check if the timestamp is
4365 4366 * valid to use.
4366 4367 */
4367 4368 if ((mp1->b_next != NULL) &&
4368 4369 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next)))
4369 4370 tcp_set_rto(tcp, (int32_t)LBOLT_FASTPATH -
4370 4371 (int32_t)(intptr_t)mp1->b_prev);
4371 4372 else
4372 4373 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate);
4373 4374
4374 4375 /* Remeber the last sequence to be ACKed */
4375 4376 tcp->tcp_csuna = seg_ack;
4376 4377 if (tcp->tcp_set_timer == 1) {
4377 4378 TCP_TIMER_RESTART(tcp, tcp->tcp_rto);
4378 4379 tcp->tcp_set_timer = 0;
4379 4380 }
4380 4381 } else {
4381 4382 TCPS_BUMP_MIB(tcps, tcpRttNoUpdate);
4382 4383 }
4383 4384
4384 4385 /* Eat acknowledged bytes off the xmit queue. */
4385 4386 for (;;) {
4386 4387 mblk_t *mp2;
4387 4388 uchar_t *wptr;
4388 4389
4389 4390 wptr = mp1->b_wptr;
4390 4391 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX);
4391 4392 bytes_acked -= (int)(wptr - mp1->b_rptr);
4392 4393 if (bytes_acked < 0) {
4393 4394 mp1->b_rptr = wptr + bytes_acked;
4394 4395 /*
4395 4396 * Set a new timestamp if all the bytes timed by the
4396 4397 * old timestamp have been ack'ed.
4397 4398 */
4398 4399 if (SEQ_GT(seg_ack,
4399 4400 (uint32_t)(uintptr_t)(mp1->b_next))) {
4400 4401 mp1->b_prev =
4401 4402 (mblk_t *)(uintptr_t)LBOLT_FASTPATH;
4402 4403 mp1->b_next = NULL;
4403 4404 }
4404 4405 break;
4405 4406 }
4406 4407 mp1->b_next = NULL;
4407 4408 mp1->b_prev = NULL;
4408 4409 mp2 = mp1;
4409 4410 mp1 = mp1->b_cont;
4410 4411
4411 4412 /*
4412 4413 * This notification is required for some zero-copy
4413 4414 * clients to maintain a copy semantic. After the data
4414 4415 * is ack'ed, client is safe to modify or reuse the buffer.
4415 4416 */
4416 4417 if (tcp->tcp_snd_zcopy_aware &&
4417 4418 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY))
4418 4419 tcp_zcopy_notify(tcp);
4419 4420 freeb(mp2);
4420 4421 if (bytes_acked == 0) {
4421 4422 if (mp1 == NULL) {
4422 4423 /* Everything is ack'ed, clear the tail. */
4423 4424 tcp->tcp_xmit_tail = NULL;
4424 4425 /*
4425 4426 * Cancel the timer unless we are still
4426 4427 * waiting for an ACK for the FIN packet.
4427 4428 */
4428 4429 if (tcp->tcp_timer_tid != 0 &&
4429 4430 tcp->tcp_snxt == tcp->tcp_suna) {
4430 4431 (void) TCP_TIMER_CANCEL(tcp,
4431 4432 tcp->tcp_timer_tid);
4432 4433 tcp->tcp_timer_tid = 0;
4433 4434 }
4434 4435 goto pre_swnd_update;
4435 4436 }
4436 4437 if (mp2 != tcp->tcp_xmit_tail)
4437 4438 break;
4438 4439 tcp->tcp_xmit_tail = mp1;
4439 4440 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <=
4440 4441 (uintptr_t)INT_MAX);
4441 4442 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr -
4442 4443 mp1->b_rptr);
4443 4444 break;
4444 4445 }
4445 4446 if (mp1 == NULL) {
4446 4447 /*
4447 4448 * More was acked but there is nothing more
4448 4449 * outstanding. This means that the FIN was
4449 4450 * just acked or that we're talking to a clown.
4450 4451 */
4451 4452 fin_acked:
4452 4453 ASSERT(tcp->tcp_fin_sent);
4453 4454 tcp->tcp_xmit_tail = NULL;
4454 4455 if (tcp->tcp_fin_sent) {
4455 4456 /* FIN was acked - making progress */
4456 4457 if (!tcp->tcp_fin_acked)
4457 4458 tcp->tcp_ip_forward_progress = B_TRUE;
4458 4459 tcp->tcp_fin_acked = B_TRUE;
4459 4460 if (tcp->tcp_linger_tid != 0 &&
4460 4461 TCP_TIMER_CANCEL(tcp,
4461 4462 tcp->tcp_linger_tid) >= 0) {
4462 4463 tcp_stop_lingering(tcp);
4463 4464 freemsg(mp);
4464 4465 mp = NULL;
4465 4466 }
4466 4467 } else {
4467 4468 /*
4468 4469 * We should never get here because
4469 4470 * we have already checked that the
4470 4471 * number of bytes ack'ed should be
4471 4472 * smaller than or equal to what we
4472 4473 * have sent so far (it is the
4473 4474 * acceptability check of the ACK).
4474 4475 * We can only get here if the send
4475 4476 * queue is corrupted.
4476 4477 *
4477 4478 * Terminate the connection and
4478 4479 * panic the system. It is better
4479 4480 * for us to panic instead of
4480 4481 * continuing to avoid other disaster.
4481 4482 */
4482 4483 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt,
4483 4484 tcp->tcp_rnxt, TH_RST|TH_ACK);
4484 4485 panic("Memory corruption "
4485 4486 "detected for connection %s.",
4486 4487 tcp_display(tcp, NULL,
4487 4488 DISP_ADDR_AND_PORT));
4488 4489 /*NOTREACHED*/
4489 4490 }
4490 4491 goto pre_swnd_update;
4491 4492 }
4492 4493 ASSERT(mp2 != tcp->tcp_xmit_tail);
4493 4494 }
4494 4495 if (tcp->tcp_unsent) {
4495 4496 flags |= TH_XMIT_NEEDED;
4496 4497 }
4497 4498 pre_swnd_update:
4498 4499 tcp->tcp_xmit_head = mp1;
4499 4500 swnd_update:
4500 4501 /*
4501 4502 * The following check is different from most other implementations.
4502 4503 * For bi-directional transfer, when segments are dropped, the
4503 4504 * "normal" check will not accept a window update in those
4504 4505 * retransmitted segemnts. Failing to do that, TCP may send out
4505 4506 * segments which are outside receiver's window. As TCP accepts
4506 4507 * the ack in those retransmitted segments, if the window update in
4507 4508 * the same segment is not accepted, TCP will incorrectly calculates
4508 4509 * that it can send more segments. This can create a deadlock
4509 4510 * with the receiver if its window becomes zero.
4510 4511 */
4511 4512 if (SEQ_LT(tcp->tcp_swl2, seg_ack) ||
4512 4513 SEQ_LT(tcp->tcp_swl1, seg_seq) ||
4513 4514 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) {
4514 4515 /*
4515 4516 * The criteria for update is:
4516 4517 *
4517 4518 * 1. the segment acknowledges some data. Or
4518 4519 * 2. the segment is new, i.e. it has a higher seq num. Or
4519 4520 * 3. the segment is not old and the advertised window is
4520 4521 * larger than the previous advertised window.
4521 4522 */
4522 4523 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd)
4523 4524 flags |= TH_XMIT_NEEDED;
4524 4525 tcp->tcp_swnd = new_swnd;
4525 4526 if (new_swnd > tcp->tcp_max_swnd)
4526 4527 tcp->tcp_max_swnd = new_swnd;
4527 4528 tcp->tcp_swl1 = seg_seq;
4528 4529 tcp->tcp_swl2 = seg_ack;
4529 4530 }
4530 4531 est:
4531 4532 if (tcp->tcp_state > TCPS_ESTABLISHED) {
4532 4533
4533 4534 switch (tcp->tcp_state) {
4534 4535 case TCPS_FIN_WAIT_1:
4535 4536 if (tcp->tcp_fin_acked) {
4536 4537 tcp->tcp_state = TCPS_FIN_WAIT_2;
4537 4538 DTRACE_TCP6(state__change, void, NULL,
4538 4539 ip_xmit_attr_t *, connp->conn_ixa,
4539 4540 void, NULL, tcp_t *, tcp, void, NULL,
4540 4541 int32_t, TCPS_FIN_WAIT_1);
4541 4542 /*
4542 4543 * We implement the non-standard BSD/SunOS
4543 4544 * FIN_WAIT_2 flushing algorithm.
4544 4545 * If there is no user attached to this
4545 4546 * TCP endpoint, then this TCP struct
4546 4547 * could hang around forever in FIN_WAIT_2
4547 4548 * state if the peer forgets to send us
4548 4549 * a FIN. To prevent this, we wait only
4549 4550 * 2*MSL (a convenient time value) for
4550 4551 * the FIN to arrive. If it doesn't show up,
4551 4552 * we flush the TCP endpoint. This algorithm,
4552 4553 * though a violation of RFC-793, has worked
4553 4554 * for over 10 years in BSD systems.
4554 4555 * Note: SunOS 4.x waits 675 seconds before
4555 4556 * flushing the FIN_WAIT_2 connection.
4556 4557 */
4557 4558 TCP_TIMER_RESTART(tcp,
4558 4559 tcp->tcp_fin_wait_2_flush_interval);
4559 4560 }
4560 4561 break;
4561 4562 case TCPS_FIN_WAIT_2:
4562 4563 break; /* Shutdown hook? */
4563 4564 case TCPS_LAST_ACK:
4564 4565 freemsg(mp);
4565 4566 if (tcp->tcp_fin_acked) {
4566 4567 (void) tcp_clean_death(tcp, 0);
4567 4568 return;
4568 4569 }
4569 4570 goto xmit_check;
4570 4571 case TCPS_CLOSING:
4571 4572 if (tcp->tcp_fin_acked) {
4572 4573 SET_TIME_WAIT(tcps, tcp, connp);
4573 4574 DTRACE_TCP6(state__change, void, NULL,
4574 4575 ip_xmit_attr_t *, connp->conn_ixa, void,
4575 4576 NULL, tcp_t *, tcp, void, NULL, int32_t,
4576 4577 TCPS_CLOSING);
4577 4578 }
4578 4579 /*FALLTHRU*/
4579 4580 case TCPS_CLOSE_WAIT:
4580 4581 freemsg(mp);
4581 4582 goto xmit_check;
4582 4583 default:
4583 4584 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT);
4584 4585 break;
4585 4586 }
4586 4587 }
4587 4588 if (flags & TH_FIN) {
4588 4589 /* Make sure we ack the fin */
4589 4590 flags |= TH_ACK_NEEDED;
4590 4591 if (!tcp->tcp_fin_rcvd) {
4591 4592 tcp->tcp_fin_rcvd = B_TRUE;
4592 4593 tcp->tcp_rnxt++;
4593 4594 tcpha = tcp->tcp_tcpha;
4594 4595 tcpha->tha_ack = htonl(tcp->tcp_rnxt);
4595 4596
4596 4597 /*
4597 4598 * Generate the ordrel_ind at the end unless the
4598 4599 * conn is detached or it is a STREAMS based eager.
4599 4600 * In the eager case we defer the notification until
4600 4601 * tcp_accept_finish has run.
4601 4602 */
4602 4603 if (!TCP_IS_DETACHED(tcp) && (IPCL_IS_NONSTR(connp) ||
4603 4604 (tcp->tcp_listener == NULL &&
4604 4605 !tcp->tcp_hard_binding)))
4605 4606 flags |= TH_ORDREL_NEEDED;
4606 4607 switch (tcp->tcp_state) {
4607 4608 case TCPS_SYN_RCVD:
4608 4609 tcp->tcp_state = TCPS_CLOSE_WAIT;
4609 4610 DTRACE_TCP6(state__change, void, NULL,
4610 4611 ip_xmit_attr_t *, connp->conn_ixa,
4611 4612 void, NULL, tcp_t *, tcp, void, NULL,
4612 4613 int32_t, TCPS_SYN_RCVD);
4613 4614 /* Keepalive? */
4614 4615 break;
4615 4616 case TCPS_ESTABLISHED:
4616 4617 tcp->tcp_state = TCPS_CLOSE_WAIT;
4617 4618 DTRACE_TCP6(state__change, void, NULL,
4618 4619 ip_xmit_attr_t *, connp->conn_ixa,
4619 4620 void, NULL, tcp_t *, tcp, void, NULL,
4620 4621 int32_t, TCPS_ESTABLISHED);
4621 4622 /* Keepalive? */
4622 4623 break;
4623 4624 case TCPS_FIN_WAIT_1:
4624 4625 if (!tcp->tcp_fin_acked) {
4625 4626 tcp->tcp_state = TCPS_CLOSING;
4626 4627 DTRACE_TCP6(state__change, void, NULL,
4627 4628 ip_xmit_attr_t *, connp->conn_ixa,
4628 4629 void, NULL, tcp_t *, tcp, void,
4629 4630 NULL, int32_t, TCPS_FIN_WAIT_1);
4630 4631 break;
4631 4632 }
4632 4633 /* FALLTHRU */
4633 4634 case TCPS_FIN_WAIT_2:
4634 4635 SET_TIME_WAIT(tcps, tcp, connp);
4635 4636 DTRACE_TCP6(state__change, void, NULL,
4636 4637 ip_xmit_attr_t *, connp->conn_ixa, void,
4637 4638 NULL, tcp_t *, tcp, void, NULL, int32_t,
4638 4639 TCPS_FIN_WAIT_2);
4639 4640 if (seg_len) {
4640 4641 /*
4641 4642 * implies data piggybacked on FIN.
4642 4643 * break to handle data.
4643 4644 */
4644 4645 break;
4645 4646 }
4646 4647 freemsg(mp);
4647 4648 goto ack_check;
4648 4649 }
4649 4650 }
4650 4651 }
4651 4652 if (mp == NULL)
4652 4653 goto xmit_check;
4653 4654 if (seg_len == 0) {
4654 4655 freemsg(mp);
4655 4656 goto xmit_check;
4656 4657 }
4657 4658 if (mp->b_rptr == mp->b_wptr) {
4658 4659 /*
4659 4660 * The header has been consumed, so we remove the
4660 4661 * zero-length mblk here.
4661 4662 */
4662 4663 mp1 = mp;
4663 4664 mp = mp->b_cont;
4664 4665 freeb(mp1);
4665 4666 }
4666 4667 update_ack:
4667 4668 tcpha = tcp->tcp_tcpha;
4668 4669 tcp->tcp_rack_cnt++;
4669 4670 {
4670 4671 uint32_t cur_max;
4671 4672
4672 4673 cur_max = tcp->tcp_rack_cur_max;
4673 4674 if (tcp->tcp_rack_cnt >= cur_max) {
4674 4675 /*
4675 4676 * We have more unacked data than we should - send
4676 4677 * an ACK now.
4677 4678 */
4678 4679 flags |= TH_ACK_NEEDED;
4679 4680 cur_max++;
4680 4681 if (cur_max > tcp->tcp_rack_abs_max)
4681 4682 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max;
4682 4683 else
4683 4684 tcp->tcp_rack_cur_max = cur_max;
4684 4685 } else if (TCP_IS_DETACHED(tcp)) {
4685 4686 /* We don't have an ACK timer for detached TCP. */
4686 4687 flags |= TH_ACK_NEEDED;
4687 4688 } else if (seg_len < mss) {
4688 4689 /*
4689 4690 * If we get a segment that is less than an mss, and we
4690 4691 * already have unacknowledged data, and the amount
4691 4692 * unacknowledged is not a multiple of mss, then we
4692 4693 * better generate an ACK now. Otherwise, this may be
4693 4694 * the tail piece of a transaction, and we would rather
4694 4695 * wait for the response.
4695 4696 */
4696 4697 uint32_t udif;
4697 4698 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <=
4698 4699 (uintptr_t)INT_MAX);
4699 4700 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack);
4700 4701 if (udif && (udif % mss))
4701 4702 flags |= TH_ACK_NEEDED;
4702 4703 else
4703 4704 flags |= TH_ACK_TIMER_NEEDED;
4704 4705 } else {
4705 4706 /* Start delayed ack timer */
4706 4707 flags |= TH_ACK_TIMER_NEEDED;
4707 4708 }
4708 4709 }
4709 4710 tcp->tcp_rnxt += seg_len;
4710 4711 tcpha->tha_ack = htonl(tcp->tcp_rnxt);
4711 4712
4712 4713 if (mp == NULL)
4713 4714 goto xmit_check;
4714 4715
4715 4716 /* Update SACK list */
4716 4717 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) {
4717 4718 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt,
4718 4719 &(tcp->tcp_num_sack_blk));
4719 4720 }
4720 4721
4721 4722 if (tcp->tcp_urp_mp) {
4722 4723 tcp->tcp_urp_mp->b_cont = mp;
4723 4724 mp = tcp->tcp_urp_mp;
4724 4725 tcp->tcp_urp_mp = NULL;
4725 4726 /* Ready for a new signal. */
4726 4727 tcp->tcp_urp_last_valid = B_FALSE;
4727 4728 #ifdef DEBUG
4728 4729 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
4729 4730 "tcp_rput: sending exdata_ind %s",
4730 4731 tcp_display(tcp, NULL, DISP_PORT_ONLY));
4731 4732 #endif /* DEBUG */
4732 4733 }
4733 4734
4734 4735 /*
4735 4736 * Check for ancillary data changes compared to last segment.
4736 4737 */
4737 4738 if (connp->conn_recv_ancillary.crb_all != 0) {
4738 4739 mp = tcp_input_add_ancillary(tcp, mp, &ipp, ira);
4739 4740 if (mp == NULL)
4740 4741 return;
4741 4742 }
4742 4743
4743 4744 if (IPCL_IS_NONSTR(connp)) {
4744 4745 /*
4745 4746 * Non-STREAMS socket
4746 4747 */
4747 4748 boolean_t push = flags & (TH_PUSH|TH_FIN);
4748 4749 int error;
4749 4750
4750 4751 if ((*sockupcalls->su_recv)(connp->conn_upper_handle,
4751 4752 mp, seg_len, 0, &error, &push) <= 0) {
4752 4753 /*
4753 4754 * We should never be in middle of a
4754 4755 * fallback, the squeue guarantees that.
4755 4756 */
4756 4757 ASSERT(error != EOPNOTSUPP);
4757 4758 if (error == ENOSPC)
4758 4759 tcp->tcp_rwnd -= seg_len;
4759 4760 } else if (push) {
4760 4761 /* PUSH bit set and sockfs is not flow controlled */
4761 4762 flags |= tcp_rwnd_reopen(tcp);
4762 4763 }
4763 4764 } else if (tcp->tcp_listener != NULL || tcp->tcp_hard_binding) {
4764 4765 /*
4765 4766 * Side queue inbound data until the accept happens.
4766 4767 * tcp_accept/tcp_rput drains this when the accept happens.
4767 4768 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or
4768 4769 * T_EXDATA_IND) it is queued on b_next.
4769 4770 * XXX Make urgent data use this. Requires:
4770 4771 * Removing tcp_listener check for TH_URG
4771 4772 * Making M_PCPROTO and MARK messages skip the eager case
4772 4773 */
4773 4774
4774 4775 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred);
4775 4776 } else {
4776 4777 /* Active STREAMS socket */
4777 4778 if (mp->b_datap->db_type != M_DATA ||
4778 4779 (flags & TH_MARKNEXT_NEEDED)) {
4779 4780 if (tcp->tcp_rcv_list != NULL) {
4780 4781 flags |= tcp_rcv_drain(tcp);
4781 4782 }
4782 4783 ASSERT(tcp->tcp_rcv_list == NULL ||
4783 4784 tcp->tcp_fused_sigurg);
4784 4785
4785 4786 if (flags & TH_MARKNEXT_NEEDED) {
4786 4787 #ifdef DEBUG
4787 4788 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
4788 4789 "tcp_rput: sending MSGMARKNEXT %s",
4789 4790 tcp_display(tcp, NULL,
4790 4791 DISP_PORT_ONLY));
4791 4792 #endif /* DEBUG */
4792 4793 mp->b_flag |= MSGMARKNEXT;
4793 4794 flags &= ~TH_MARKNEXT_NEEDED;
4794 4795 }
4795 4796
4796 4797 if (is_system_labeled())
4797 4798 tcp_setcred_data(mp, ira);
4798 4799
4799 4800 putnext(connp->conn_rq, mp);
4800 4801 if (!canputnext(connp->conn_rq))
4801 4802 tcp->tcp_rwnd -= seg_len;
4802 4803 } else if ((flags & (TH_PUSH|TH_FIN)) ||
4803 4804 tcp->tcp_rcv_cnt + seg_len >= connp->conn_rcvbuf >> 3) {
4804 4805 if (tcp->tcp_rcv_list != NULL) {
4805 4806 /*
4806 4807 * Enqueue the new segment first and then
4807 4808 * call tcp_rcv_drain() to send all data
4808 4809 * up. The other way to do this is to
4809 4810 * send all queued data up and then call
4810 4811 * putnext() to send the new segment up.
4811 4812 * This way can remove the else part later
4812 4813 * on.
4813 4814 *
4814 4815 * We don't do this to avoid one more call to
4815 4816 * canputnext() as tcp_rcv_drain() needs to
4816 4817 * call canputnext().
4817 4818 */
4818 4819 tcp_rcv_enqueue(tcp, mp, seg_len,
4819 4820 ira->ira_cred);
4820 4821 flags |= tcp_rcv_drain(tcp);
4821 4822 } else {
4822 4823 if (is_system_labeled())
4823 4824 tcp_setcred_data(mp, ira);
4824 4825
4825 4826 putnext(connp->conn_rq, mp);
4826 4827 if (!canputnext(connp->conn_rq))
4827 4828 tcp->tcp_rwnd -= seg_len;
4828 4829 }
4829 4830 } else {
4830 4831 /*
4831 4832 * Enqueue all packets when processing an mblk
4832 4833 * from the co queue and also enqueue normal packets.
4833 4834 */
4834 4835 tcp_rcv_enqueue(tcp, mp, seg_len, ira->ira_cred);
4835 4836 }
4836 4837 /*
4837 4838 * Make sure the timer is running if we have data waiting
4838 4839 * for a push bit. This provides resiliency against
4839 4840 * implementations that do not correctly generate push bits.
4840 4841 */
4841 4842 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) {
4842 4843 /*
4843 4844 * The connection may be closed at this point, so don't
4844 4845 * do anything for a detached tcp.
4845 4846 */
4846 4847 if (!TCP_IS_DETACHED(tcp))
4847 4848 tcp->tcp_push_tid = TCP_TIMER(tcp,
4848 4849 tcp_push_timer,
4849 4850 tcps->tcps_push_timer_interval);
4850 4851 }
4851 4852 }
4852 4853
4853 4854 xmit_check:
4854 4855 /* Is there anything left to do? */
4855 4856 ASSERT(!(flags & TH_MARKNEXT_NEEDED));
4856 4857 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED|
4857 4858 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED|
4858 4859 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
4859 4860 goto done;
4860 4861
4861 4862 /* Any transmit work to do and a non-zero window? */
4862 4863 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT|
4863 4864 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) {
4864 4865 if (flags & TH_REXMIT_NEEDED) {
4865 4866 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna;
4866 4867
4867 4868 TCPS_BUMP_MIB(tcps, tcpOutFastRetrans);
4868 4869 if (snd_size > mss)
4869 4870 snd_size = mss;
4870 4871 if (snd_size > tcp->tcp_swnd)
4871 4872 snd_size = tcp->tcp_swnd;
4872 4873 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size,
4873 4874 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size,
4874 4875 B_TRUE);
4875 4876
4876 4877 if (mp1 != NULL) {
4877 4878 tcp->tcp_xmit_head->b_prev =
4878 4879 (mblk_t *)LBOLT_FASTPATH;
4879 4880 tcp->tcp_csuna = tcp->tcp_snxt;
4880 4881 TCPS_BUMP_MIB(tcps, tcpRetransSegs);
4881 4882 TCPS_UPDATE_MIB(tcps, tcpRetransBytes,
4882 4883 snd_size);
4883 4884 tcp_send_data(tcp, mp1);
4884 4885 }
4885 4886 }
4886 4887 if (flags & TH_NEED_SACK_REXMIT) {
4887 4888 tcp_sack_rexmit(tcp, &flags);
4888 4889 }
4889 4890 /*
4890 4891 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send
4891 4892 * out new segment. Note that tcp_rexmit should not be
4892 4893 * set, otherwise TH_LIMIT_XMIT should not be set.
4893 4894 */
4894 4895 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) {
4895 4896 if (!tcp->tcp_rexmit) {
4896 4897 tcp_wput_data(tcp, NULL, B_FALSE);
4897 4898 } else {
4898 4899 tcp_ss_rexmit(tcp);
4899 4900 }
4900 4901 }
4901 4902 /*
4902 4903 * Adjust tcp_cwnd back to normal value after sending
4903 4904 * new data segments.
4904 4905 */
4905 4906 if (flags & TH_LIMIT_XMIT) {
4906 4907 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1);
4907 4908 /*
4908 4909 * This will restart the timer. Restarting the
4909 4910 * timer is used to avoid a timeout before the
4910 4911 * limited transmitted segment's ACK gets back.
4911 4912 */
4912 4913 if (tcp->tcp_xmit_head != NULL)
4913 4914 tcp->tcp_xmit_head->b_prev =
4914 4915 (mblk_t *)LBOLT_FASTPATH;
4915 4916 }
4916 4917
4917 4918 /* Anything more to do? */
4918 4919 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED|
4919 4920 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0)
4920 4921 goto done;
4921 4922 }
4922 4923 ack_check:
4923 4924 if (flags & TH_SEND_URP_MARK) {
4924 4925 ASSERT(tcp->tcp_urp_mark_mp);
4925 4926 ASSERT(!IPCL_IS_NONSTR(connp));
4926 4927 /*
4927 4928 * Send up any queued data and then send the mark message
4928 4929 */
4929 4930 if (tcp->tcp_rcv_list != NULL) {
4930 4931 flags |= tcp_rcv_drain(tcp);
4931 4932
4932 4933 }
4933 4934 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
4934 4935 mp1 = tcp->tcp_urp_mark_mp;
4935 4936 tcp->tcp_urp_mark_mp = NULL;
4936 4937 if (is_system_labeled())
4937 4938 tcp_setcred_data(mp1, ira);
4938 4939
4939 4940 putnext(connp->conn_rq, mp1);
4940 4941 #ifdef DEBUG
4941 4942 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE,
4942 4943 "tcp_rput: sending zero-length %s %s",
4943 4944 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" :
4944 4945 "MSGNOTMARKNEXT"),
4945 4946 tcp_display(tcp, NULL, DISP_PORT_ONLY));
4946 4947 #endif /* DEBUG */
4947 4948 flags &= ~TH_SEND_URP_MARK;
4948 4949 }
4949 4950 if (flags & TH_ACK_NEEDED) {
4950 4951 /*
4951 4952 * Time to send an ack for some reason.
4952 4953 */
4953 4954 mp1 = tcp_ack_mp(tcp);
4954 4955
4955 4956 if (mp1 != NULL) {
4956 4957 tcp_send_data(tcp, mp1);
4957 4958 BUMP_LOCAL(tcp->tcp_obsegs);
4958 4959 TCPS_BUMP_MIB(tcps, tcpOutAck);
4959 4960 }
4960 4961 if (tcp->tcp_ack_tid != 0) {
4961 4962 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid);
4962 4963 tcp->tcp_ack_tid = 0;
4963 4964 }
4964 4965 }
4965 4966 if (flags & TH_ACK_TIMER_NEEDED) {
4966 4967 /*
4967 4968 * Arrange for deferred ACK or push wait timeout.
4968 4969 * Start timer if it is not already running.
4969 4970 */
4970 4971 if (tcp->tcp_ack_tid == 0) {
4971 4972 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer,
4972 4973 tcp->tcp_localnet ?
4973 4974 tcps->tcps_local_dack_interval :
4974 4975 tcps->tcps_deferred_ack_interval);
4975 4976 }
4976 4977 }
4977 4978 if (flags & TH_ORDREL_NEEDED) {
4978 4979 /*
4979 4980 * Notify upper layer about an orderly release. If this is
4980 4981 * a non-STREAMS socket, then just make an upcall. For STREAMS
4981 4982 * we send up an ordrel_ind, unless this is an eager, in which
4982 4983 * case the ordrel will be sent when tcp_accept_finish runs.
4983 4984 * Note that for non-STREAMS we make an upcall even if it is an
4984 4985 * eager, because we have an upper handle to send it to.
4985 4986 */
4986 4987 ASSERT(IPCL_IS_NONSTR(connp) || tcp->tcp_listener == NULL);
4987 4988 ASSERT(!tcp->tcp_detached);
4988 4989
4989 4990 if (IPCL_IS_NONSTR(connp)) {
4990 4991 ASSERT(tcp->tcp_ordrel_mp == NULL);
4991 4992 tcp->tcp_ordrel_done = B_TRUE;
4992 4993 (*sockupcalls->su_opctl)(connp->conn_upper_handle,
4993 4994 SOCK_OPCTL_SHUT_RECV, 0);
4994 4995 goto done;
4995 4996 }
4996 4997
4997 4998 if (tcp->tcp_rcv_list != NULL) {
4998 4999 /*
4999 5000 * Push any mblk(s) enqueued from co processing.
5000 5001 */
5001 5002 flags |= tcp_rcv_drain(tcp);
5002 5003 }
5003 5004 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg);
5004 5005
5005 5006 mp1 = tcp->tcp_ordrel_mp;
5006 5007 tcp->tcp_ordrel_mp = NULL;
5007 5008 tcp->tcp_ordrel_done = B_TRUE;
5008 5009 putnext(connp->conn_rq, mp1);
5009 5010 }
5010 5011 done:
5011 5012 ASSERT(!(flags & TH_MARKNEXT_NEEDED));
5012 5013 }
5013 5014
5014 5015 /*
5015 5016 * Attach ancillary data to a received TCP segments for the
5016 5017 * ancillary pieces requested by the application that are
5017 5018 * different than they were in the previous data segment.
5018 5019 *
5019 5020 * Save the "current" values once memory allocation is ok so that
5020 5021 * when memory allocation fails we can just wait for the next data segment.
5021 5022 */
5022 5023 static mblk_t *
5023 5024 tcp_input_add_ancillary(tcp_t *tcp, mblk_t *mp, ip_pkt_t *ipp,
5024 5025 ip_recv_attr_t *ira)
5025 5026 {
5026 5027 struct T_optdata_ind *todi;
5027 5028 int optlen;
5028 5029 uchar_t *optptr;
5029 5030 struct T_opthdr *toh;
5030 5031 crb_t addflag; /* Which pieces to add */
5031 5032 mblk_t *mp1;
5032 5033 conn_t *connp = tcp->tcp_connp;
5033 5034
5034 5035 optlen = 0;
5035 5036 addflag.crb_all = 0;
5036 5037 /* If app asked for pktinfo and the index has changed ... */
5037 5038 if (connp->conn_recv_ancillary.crb_ip_recvpktinfo &&
5038 5039 ira->ira_ruifindex != tcp->tcp_recvifindex) {
5039 5040 optlen += sizeof (struct T_opthdr) +
5040 5041 sizeof (struct in6_pktinfo);
5041 5042 addflag.crb_ip_recvpktinfo = 1;
5042 5043 }
5043 5044 /* If app asked for hoplimit and it has changed ... */
5044 5045 if (connp->conn_recv_ancillary.crb_ipv6_recvhoplimit &&
5045 5046 ipp->ipp_hoplimit != tcp->tcp_recvhops) {
5046 5047 optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
5047 5048 addflag.crb_ipv6_recvhoplimit = 1;
5048 5049 }
5049 5050 /* If app asked for tclass and it has changed ... */
5050 5051 if (connp->conn_recv_ancillary.crb_ipv6_recvtclass &&
5051 5052 ipp->ipp_tclass != tcp->tcp_recvtclass) {
5052 5053 optlen += sizeof (struct T_opthdr) + sizeof (uint_t);
5053 5054 addflag.crb_ipv6_recvtclass = 1;
5054 5055 }
5055 5056 /*
5056 5057 * If app asked for hopbyhop headers and it has changed ...
5057 5058 * For security labels, note that (1) security labels can't change on
5058 5059 * a connected socket at all, (2) we're connected to at most one peer,
5059 5060 * (3) if anything changes, then it must be some other extra option.
5060 5061 */
5061 5062 if (connp->conn_recv_ancillary.crb_ipv6_recvhopopts &&
5062 5063 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen,
5063 5064 (ipp->ipp_fields & IPPF_HOPOPTS),
5064 5065 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) {
5065 5066 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen;
5066 5067 addflag.crb_ipv6_recvhopopts = 1;
5067 5068 if (!ip_allocbuf((void **)&tcp->tcp_hopopts,
5068 5069 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS),
5069 5070 ipp->ipp_hopopts, ipp->ipp_hopoptslen))
5070 5071 return (mp);
5071 5072 }
5072 5073 /* If app asked for dst headers before routing headers ... */
5073 5074 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdrdstopts &&
5074 5075 ip_cmpbuf(tcp->tcp_rthdrdstopts, tcp->tcp_rthdrdstoptslen,
5075 5076 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
5076 5077 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen)) {
5077 5078 optlen += sizeof (struct T_opthdr) +
5078 5079 ipp->ipp_rthdrdstoptslen;
5079 5080 addflag.crb_ipv6_recvrthdrdstopts = 1;
5080 5081 if (!ip_allocbuf((void **)&tcp->tcp_rthdrdstopts,
5081 5082 &tcp->tcp_rthdrdstoptslen,
5082 5083 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
5083 5084 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen))
5084 5085 return (mp);
5085 5086 }
5086 5087 /* If app asked for routing headers and it has changed ... */
5087 5088 if (connp->conn_recv_ancillary.crb_ipv6_recvrthdr &&
5088 5089 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen,
5089 5090 (ipp->ipp_fields & IPPF_RTHDR),
5090 5091 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) {
5091 5092 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen;
5092 5093 addflag.crb_ipv6_recvrthdr = 1;
5093 5094 if (!ip_allocbuf((void **)&tcp->tcp_rthdr,
5094 5095 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR),
5095 5096 ipp->ipp_rthdr, ipp->ipp_rthdrlen))
5096 5097 return (mp);
5097 5098 }
5098 5099 /* If app asked for dest headers and it has changed ... */
5099 5100 if ((connp->conn_recv_ancillary.crb_ipv6_recvdstopts ||
5100 5101 connp->conn_recv_ancillary.crb_old_ipv6_recvdstopts) &&
5101 5102 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen,
5102 5103 (ipp->ipp_fields & IPPF_DSTOPTS),
5103 5104 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) {
5104 5105 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen;
5105 5106 addflag.crb_ipv6_recvdstopts = 1;
5106 5107 if (!ip_allocbuf((void **)&tcp->tcp_dstopts,
5107 5108 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS),
5108 5109 ipp->ipp_dstopts, ipp->ipp_dstoptslen))
5109 5110 return (mp);
5110 5111 }
5111 5112
5112 5113 if (optlen == 0) {
5113 5114 /* Nothing to add */
5114 5115 return (mp);
5115 5116 }
5116 5117 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED);
5117 5118 if (mp1 == NULL) {
5118 5119 /*
5119 5120 * Defer sending ancillary data until the next TCP segment
5120 5121 * arrives.
5121 5122 */
5122 5123 return (mp);
5123 5124 }
5124 5125 mp1->b_cont = mp;
5125 5126 mp = mp1;
5126 5127 mp->b_wptr += sizeof (*todi) + optlen;
5127 5128 mp->b_datap->db_type = M_PROTO;
5128 5129 todi = (struct T_optdata_ind *)mp->b_rptr;
5129 5130 todi->PRIM_type = T_OPTDATA_IND;
5130 5131 todi->DATA_flag = 1; /* MORE data */
5131 5132 todi->OPT_length = optlen;
5132 5133 todi->OPT_offset = sizeof (*todi);
5133 5134 optptr = (uchar_t *)&todi[1];
5134 5135 /*
5135 5136 * If app asked for pktinfo and the index has changed ...
5136 5137 * Note that the local address never changes for the connection.
5137 5138 */
5138 5139 if (addflag.crb_ip_recvpktinfo) {
5139 5140 struct in6_pktinfo *pkti;
5140 5141 uint_t ifindex;
5141 5142
5142 5143 ifindex = ira->ira_ruifindex;
5143 5144 toh = (struct T_opthdr *)optptr;
5144 5145 toh->level = IPPROTO_IPV6;
5145 5146 toh->name = IPV6_PKTINFO;
5146 5147 toh->len = sizeof (*toh) + sizeof (*pkti);
5147 5148 toh->status = 0;
5148 5149 optptr += sizeof (*toh);
5149 5150 pkti = (struct in6_pktinfo *)optptr;
5150 5151 pkti->ipi6_addr = connp->conn_laddr_v6;
5151 5152 pkti->ipi6_ifindex = ifindex;
5152 5153 optptr += sizeof (*pkti);
5153 5154 ASSERT(OK_32PTR(optptr));
5154 5155 /* Save as "last" value */
5155 5156 tcp->tcp_recvifindex = ifindex;
5156 5157 }
5157 5158 /* If app asked for hoplimit and it has changed ... */
5158 5159 if (addflag.crb_ipv6_recvhoplimit) {
5159 5160 toh = (struct T_opthdr *)optptr;
5160 5161 toh->level = IPPROTO_IPV6;
5161 5162 toh->name = IPV6_HOPLIMIT;
5162 5163 toh->len = sizeof (*toh) + sizeof (uint_t);
5163 5164 toh->status = 0;
5164 5165 optptr += sizeof (*toh);
5165 5166 *(uint_t *)optptr = ipp->ipp_hoplimit;
5166 5167 optptr += sizeof (uint_t);
5167 5168 ASSERT(OK_32PTR(optptr));
5168 5169 /* Save as "last" value */
5169 5170 tcp->tcp_recvhops = ipp->ipp_hoplimit;
5170 5171 }
5171 5172 /* If app asked for tclass and it has changed ... */
5172 5173 if (addflag.crb_ipv6_recvtclass) {
5173 5174 toh = (struct T_opthdr *)optptr;
5174 5175 toh->level = IPPROTO_IPV6;
5175 5176 toh->name = IPV6_TCLASS;
5176 5177 toh->len = sizeof (*toh) + sizeof (uint_t);
5177 5178 toh->status = 0;
5178 5179 optptr += sizeof (*toh);
5179 5180 *(uint_t *)optptr = ipp->ipp_tclass;
5180 5181 optptr += sizeof (uint_t);
5181 5182 ASSERT(OK_32PTR(optptr));
5182 5183 /* Save as "last" value */
5183 5184 tcp->tcp_recvtclass = ipp->ipp_tclass;
5184 5185 }
5185 5186 if (addflag.crb_ipv6_recvhopopts) {
5186 5187 toh = (struct T_opthdr *)optptr;
5187 5188 toh->level = IPPROTO_IPV6;
5188 5189 toh->name = IPV6_HOPOPTS;
5189 5190 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen;
5190 5191 toh->status = 0;
5191 5192 optptr += sizeof (*toh);
5192 5193 bcopy((uchar_t *)ipp->ipp_hopopts, optptr, ipp->ipp_hopoptslen);
5193 5194 optptr += ipp->ipp_hopoptslen;
5194 5195 ASSERT(OK_32PTR(optptr));
5195 5196 /* Save as last value */
5196 5197 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen,
5197 5198 (ipp->ipp_fields & IPPF_HOPOPTS),
5198 5199 ipp->ipp_hopopts, ipp->ipp_hopoptslen);
5199 5200 }
5200 5201 if (addflag.crb_ipv6_recvrthdrdstopts) {
5201 5202 toh = (struct T_opthdr *)optptr;
5202 5203 toh->level = IPPROTO_IPV6;
5203 5204 toh->name = IPV6_RTHDRDSTOPTS;
5204 5205 toh->len = sizeof (*toh) + ipp->ipp_rthdrdstoptslen;
5205 5206 toh->status = 0;
5206 5207 optptr += sizeof (*toh);
5207 5208 bcopy(ipp->ipp_rthdrdstopts, optptr, ipp->ipp_rthdrdstoptslen);
5208 5209 optptr += ipp->ipp_rthdrdstoptslen;
5209 5210 ASSERT(OK_32PTR(optptr));
5210 5211 /* Save as last value */
5211 5212 ip_savebuf((void **)&tcp->tcp_rthdrdstopts,
5212 5213 &tcp->tcp_rthdrdstoptslen,
5213 5214 (ipp->ipp_fields & IPPF_RTHDRDSTOPTS),
5214 5215 ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen);
5215 5216 }
5216 5217 if (addflag.crb_ipv6_recvrthdr) {
5217 5218 toh = (struct T_opthdr *)optptr;
5218 5219 toh->level = IPPROTO_IPV6;
5219 5220 toh->name = IPV6_RTHDR;
5220 5221 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen;
5221 5222 toh->status = 0;
5222 5223 optptr += sizeof (*toh);
5223 5224 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen);
5224 5225 optptr += ipp->ipp_rthdrlen;
5225 5226 ASSERT(OK_32PTR(optptr));
5226 5227 /* Save as last value */
5227 5228 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen,
5228 5229 (ipp->ipp_fields & IPPF_RTHDR),
5229 5230 ipp->ipp_rthdr, ipp->ipp_rthdrlen);
5230 5231 }
5231 5232 if (addflag.crb_ipv6_recvdstopts) {
5232 5233 toh = (struct T_opthdr *)optptr;
5233 5234 toh->level = IPPROTO_IPV6;
5234 5235 toh->name = IPV6_DSTOPTS;
5235 5236 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen;
5236 5237 toh->status = 0;
5237 5238 optptr += sizeof (*toh);
5238 5239 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen);
5239 5240 optptr += ipp->ipp_dstoptslen;
5240 5241 ASSERT(OK_32PTR(optptr));
5241 5242 /* Save as last value */
5242 5243 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen,
5243 5244 (ipp->ipp_fields & IPPF_DSTOPTS),
5244 5245 ipp->ipp_dstopts, ipp->ipp_dstoptslen);
5245 5246 }
5246 5247 ASSERT(optptr == mp->b_wptr);
5247 5248 return (mp);
5248 5249 }
5249 5250
5250 5251 /* The minimum of smoothed mean deviation in RTO calculation. */
5251 5252 #define TCP_SD_MIN 400
5252 5253
5253 5254 /*
5254 5255 * Set RTO for this connection. The formula is from Jacobson and Karels'
5255 5256 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names
5256 5257 * are the same as those in Appendix A.2 of that paper.
5257 5258 *
5258 5259 * m = new measurement
5259 5260 * sa = smoothed RTT average (8 * average estimates).
5260 5261 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates).
5261 5262 */
5262 5263 static void
5263 5264 tcp_set_rto(tcp_t *tcp, clock_t rtt)
5264 5265 {
5265 5266 long m = TICK_TO_MSEC(rtt);
5266 5267 clock_t sa = tcp->tcp_rtt_sa;
5267 5268 clock_t sv = tcp->tcp_rtt_sd;
5268 5269 clock_t rto;
5269 5270 tcp_stack_t *tcps = tcp->tcp_tcps;
5270 5271
5271 5272 TCPS_BUMP_MIB(tcps, tcpRttUpdate);
5272 5273 tcp->tcp_rtt_update++;
5273 5274
5274 5275 /* tcp_rtt_sa is not 0 means this is a new sample. */
5275 5276 if (sa != 0) {
5276 5277 /*
5277 5278 * Update average estimator:
5278 5279 * new rtt = 7/8 old rtt + 1/8 Error
5279 5280 */
5280 5281
5281 5282 /* m is now Error in estimate. */
5282 5283 m -= sa >> 3;
5283 5284 if ((sa += m) <= 0) {
5284 5285 /*
5285 5286 * Don't allow the smoothed average to be negative.
5286 5287 * We use 0 to denote reinitialization of the
5287 5288 * variables.
5288 5289 */
5289 5290 sa = 1;
5290 5291 }
5291 5292
5292 5293 /*
5293 5294 * Update deviation estimator:
5294 5295 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev)
5295 5296 */
5296 5297 if (m < 0)
5297 5298 m = -m;
5298 5299 m -= sv >> 2;
5299 5300 sv += m;
5300 5301 } else {
5301 5302 /*
5302 5303 * This follows BSD's implementation. So the reinitialized
5303 5304 * RTO is 3 * m. We cannot go less than 2 because if the
5304 5305 * link is bandwidth dominated, doubling the window size
5305 5306 * during slow start means doubling the RTT. We want to be
5306 5307 * more conservative when we reinitialize our estimates. 3
5307 5308 * is just a convenient number.
5308 5309 */
5309 5310 sa = m << 3;
5310 5311 sv = m << 1;
5311 5312 }
5312 5313 if (sv < TCP_SD_MIN) {
5313 5314 /*
5314 5315 * We do not know that if sa captures the delay ACK
5315 5316 * effect as in a long train of segments, a receiver
5316 5317 * does not delay its ACKs. So set the minimum of sv
5317 5318 * to be TCP_SD_MIN, which is default to 400 ms, twice
5318 5319 * of BSD DATO. That means the minimum of mean
5319 5320 * deviation is 100 ms.
5320 5321 *
5321 5322 */
5322 5323 sv = TCP_SD_MIN;
5323 5324 }
5324 5325 tcp->tcp_rtt_sa = sa;
5325 5326 tcp->tcp_rtt_sd = sv;
5326 5327 /*
5327 5328 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv)
5328 5329 *
5329 5330 * Add tcp_rexmit_interval extra in case of extreme environment
5330 5331 * where the algorithm fails to work. The default value of
5331 5332 * tcp_rexmit_interval_extra should be 0.
5332 5333 *
5333 5334 * As we use a finer grained clock than BSD and update
5334 5335 * RTO for every ACKs, add in another .25 of RTT to the
5335 5336 * deviation of RTO to accomodate burstiness of 1/4 of
5336 5337 * window size.
5337 5338 */
5338 5339 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5);
5339 5340
5340 5341 TCP_SET_RTO(tcp, rto);
5341 5342
5342 5343 /* Now, we can reset tcp_timer_backoff to use the new RTO... */
5343 5344 tcp->tcp_timer_backoff = 0;
5344 5345 }
5345 5346
5346 5347 /*
5347 5348 * On a labeled system we have some protocols above TCP, such as RPC, which
5348 5349 * appear to assume that every mblk in a chain has a db_credp.
5349 5350 */
5350 5351 static void
5351 5352 tcp_setcred_data(mblk_t *mp, ip_recv_attr_t *ira)
5352 5353 {
5353 5354 ASSERT(is_system_labeled());
5354 5355 ASSERT(ira->ira_cred != NULL);
5355 5356
5356 5357 while (mp != NULL) {
5357 5358 mblk_setcred(mp, ira->ira_cred, NOPID);
5358 5359 mp = mp->b_cont;
5359 5360 }
5360 5361 }
5361 5362
5362 5363 uint_t
5363 5364 tcp_rwnd_reopen(tcp_t *tcp)
5364 5365 {
5365 5366 uint_t ret = 0;
5366 5367 uint_t thwin;
5367 5368 conn_t *connp = tcp->tcp_connp;
5368 5369
5369 5370 /* Learn the latest rwnd information that we sent to the other side. */
5370 5371 thwin = ((uint_t)ntohs(tcp->tcp_tcpha->tha_win))
5371 5372 << tcp->tcp_rcv_ws;
5372 5373 /* This is peer's calculated send window (our receive window). */
5373 5374 thwin -= tcp->tcp_rnxt - tcp->tcp_rack;
5374 5375 /*
5375 5376 * Increase the receive window to max. But we need to do receiver
5376 5377 * SWS avoidance. This means that we need to check the increase of
5377 5378 * of receive window is at least 1 MSS.
5378 5379 */
5379 5380 if (connp->conn_rcvbuf - thwin >= tcp->tcp_mss) {
5380 5381 /*
5381 5382 * If the window that the other side knows is less than max
5382 5383 * deferred acks segments, send an update immediately.
5383 5384 */
5384 5385 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) {
5385 5386 TCPS_BUMP_MIB(tcp->tcp_tcps, tcpOutWinUpdate);
5386 5387 ret = TH_ACK_NEEDED;
5387 5388 }
5388 5389 tcp->tcp_rwnd = connp->conn_rcvbuf;
5389 5390 }
5390 5391 return (ret);
5391 5392 }
5392 5393
5393 5394 /*
5394 5395 * Handle a packet that has been reclassified by TCP.
5395 5396 * This function drops the ref on connp that the caller had.
5396 5397 */
5397 5398 void
5398 5399 tcp_reinput(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira, ip_stack_t *ipst)
5399 5400 {
5400 5401 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec;
5401 5402
5402 5403 if (connp->conn_incoming_ifindex != 0 &&
5403 5404 connp->conn_incoming_ifindex != ira->ira_ruifindex) {
5404 5405 freemsg(mp);
5405 5406 CONN_DEC_REF(connp);
5406 5407 return;
5407 5408 }
5408 5409
5409 5410 if (CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss) ||
5410 5411 (ira->ira_flags & IRAF_IPSEC_SECURE)) {
5411 5412 ip6_t *ip6h;
5412 5413 ipha_t *ipha;
5413 5414
5414 5415 if (ira->ira_flags & IRAF_IS_IPV4) {
5415 5416 ipha = (ipha_t *)mp->b_rptr;
5416 5417 ip6h = NULL;
5417 5418 } else {
5418 5419 ipha = NULL;
5419 5420 ip6h = (ip6_t *)mp->b_rptr;
5420 5421 }
5421 5422 mp = ipsec_check_inbound_policy(mp, connp, ipha, ip6h, ira);
5422 5423 if (mp == NULL) {
5423 5424 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards);
5424 5425 /* Note that mp is NULL */
5425 5426 ip_drop_input("ipIfStatsInDiscards", mp, NULL);
5426 5427 CONN_DEC_REF(connp);
5427 5428 return;
5428 5429 }
5429 5430 }
5430 5431
5431 5432 if (IPCL_IS_TCP(connp)) {
5432 5433 /*
5433 5434 * do not drain, certain use cases can blow
5434 5435 * the stack
5435 5436 */
5436 5437 SQUEUE_ENTER_ONE(connp->conn_sqp, mp,
5437 5438 connp->conn_recv, connp, ira,
5438 5439 SQ_NODRAIN, SQTAG_IP_TCP_INPUT);
5439 5440 } else {
5440 5441 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */
5441 5442 (connp->conn_recv)(connp, mp, NULL,
5442 5443 ira);
5443 5444 CONN_DEC_REF(connp);
5444 5445 }
5445 5446
5446 5447 }
5447 5448
5448 5449 /* ARGSUSED */
5449 5450 static void
5450 5451 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *dummy)
5451 5452 {
5452 5453 conn_t *connp = (conn_t *)arg;
5453 5454 tcp_t *tcp = connp->conn_tcp;
5454 5455 queue_t *q = connp->conn_rq;
5455 5456
5456 5457 ASSERT(!IPCL_IS_NONSTR(connp));
5457 5458 mutex_enter(&tcp->tcp_rsrv_mp_lock);
5458 5459 tcp->tcp_rsrv_mp = mp;
5459 5460 mutex_exit(&tcp->tcp_rsrv_mp_lock);
5460 5461
5461 5462 if (TCP_IS_DETACHED(tcp) || q == NULL) {
5462 5463 return;
5463 5464 }
5464 5465
5465 5466 if (tcp->tcp_fused) {
5466 5467 tcp_fuse_backenable(tcp);
5467 5468 return;
5468 5469 }
5469 5470
5470 5471 if (canputnext(q)) {
5471 5472 /* Not flow-controlled, open rwnd */
5472 5473 tcp->tcp_rwnd = connp->conn_rcvbuf;
5473 5474
5474 5475 /*
5475 5476 * Send back a window update immediately if TCP is above
5476 5477 * ESTABLISHED state and the increase of the rcv window
5477 5478 * that the other side knows is at least 1 MSS after flow
5478 5479 * control is lifted.
5479 5480 */
5480 5481 if (tcp->tcp_state >= TCPS_ESTABLISHED &&
5481 5482 tcp_rwnd_reopen(tcp) == TH_ACK_NEEDED) {
5482 5483 tcp_xmit_ctl(NULL, tcp,
5483 5484 (tcp->tcp_swnd == 0) ? tcp->tcp_suna :
5484 5485 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK);
5485 5486 }
5486 5487 }
5487 5488 }
5488 5489
5489 5490 /*
5490 5491 * The read side service routine is called mostly when we get back-enabled as a
5491 5492 * result of flow control relief. Since we don't actually queue anything in
5492 5493 * TCP, we have no data to send out of here. What we do is clear the receive
5493 5494 * window, and send out a window update.
5494 5495 */
5495 5496 void
5496 5497 tcp_rsrv(queue_t *q)
5497 5498 {
5498 5499 conn_t *connp = Q_TO_CONN(q);
5499 5500 tcp_t *tcp = connp->conn_tcp;
5500 5501 mblk_t *mp;
5501 5502
5502 5503 /* No code does a putq on the read side */
5503 5504 ASSERT(q->q_first == NULL);
5504 5505
5505 5506 /*
5506 5507 * If tcp->tcp_rsrv_mp == NULL, it means that tcp_rsrv() has already
5507 5508 * been run. So just return.
5508 5509 */
5509 5510 mutex_enter(&tcp->tcp_rsrv_mp_lock);
5510 5511 if ((mp = tcp->tcp_rsrv_mp) == NULL) {
5511 5512 mutex_exit(&tcp->tcp_rsrv_mp_lock);
5512 5513 return;
5513 5514 }
5514 5515 tcp->tcp_rsrv_mp = NULL;
5515 5516 mutex_exit(&tcp->tcp_rsrv_mp_lock);
5516 5517
5517 5518 CONN_INC_REF(connp);
5518 5519 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_rsrv_input, connp,
5519 5520 NULL, SQ_PROCESS, SQTAG_TCP_RSRV);
5520 5521 }
5521 5522
5522 5523 /* At minimum we need 8 bytes in the TCP header for the lookup */
5523 5524 #define ICMP_MIN_TCP_HDR 8
5524 5525
5525 5526 /*
5526 5527 * tcp_icmp_input is called as conn_recvicmp to process ICMP error messages
5527 5528 * passed up by IP. The message is always received on the correct tcp_t.
5528 5529 * Assumes that IP has pulled up everything up to and including the ICMP header.
5529 5530 */
5530 5531 /* ARGSUSED2 */
5531 5532 void
5532 5533 tcp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
5533 5534 {
5534 5535 conn_t *connp = (conn_t *)arg1;
5535 5536 icmph_t *icmph;
5536 5537 ipha_t *ipha;
5537 5538 int iph_hdr_length;
5538 5539 tcpha_t *tcpha;
5539 5540 uint32_t seg_seq;
5540 5541 tcp_t *tcp = connp->conn_tcp;
5541 5542
5542 5543 /* Assume IP provides aligned packets */
5543 5544 ASSERT(OK_32PTR(mp->b_rptr));
5544 5545 ASSERT((MBLKL(mp) >= sizeof (ipha_t)));
5545 5546
5546 5547 /*
5547 5548 * It's possible we have a closed, but not yet destroyed, TCP
5548 5549 * connection. Several fields (e.g. conn_ixa->ixa_ire) are invalid
5549 5550 * in the closed state, so don't take any chances and drop the packet.
5550 5551 */
5551 5552 if (tcp->tcp_state == TCPS_CLOSED) {
5552 5553 freemsg(mp);
5553 5554 return;
5554 5555 }
5555 5556
5556 5557 /*
5557 5558 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent
5558 5559 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6.
5559 5560 */
5560 5561 if (!(ira->ira_flags & IRAF_IS_IPV4)) {
5561 5562 tcp_icmp_error_ipv6(tcp, mp, ira);
5562 5563 return;
5563 5564 }
5564 5565
5565 5566 /* Skip past the outer IP and ICMP headers */
5566 5567 iph_hdr_length = ira->ira_ip_hdr_length;
5567 5568 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
5568 5569 /*
5569 5570 * If we don't have the correct outer IP header length
5570 5571 * or if we don't have a complete inner IP header
5571 5572 * drop it.
5572 5573 */
5573 5574 if (iph_hdr_length < sizeof (ipha_t) ||
5574 5575 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) {
5575 5576 noticmpv4:
5576 5577 freemsg(mp);
5577 5578 return;
5578 5579 }
5579 5580 ipha = (ipha_t *)&icmph[1];
5580 5581
5581 5582 /* Skip past the inner IP and find the ULP header */
5582 5583 iph_hdr_length = IPH_HDR_LENGTH(ipha);
5583 5584 tcpha = (tcpha_t *)((char *)ipha + iph_hdr_length);
5584 5585 /*
5585 5586 * If we don't have the correct inner IP header length or if the ULP
5586 5587 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR
5587 5588 * bytes of TCP header, drop it.
5588 5589 */
5589 5590 if (iph_hdr_length < sizeof (ipha_t) ||
5590 5591 ipha->ipha_protocol != IPPROTO_TCP ||
5591 5592 (uchar_t *)tcpha + ICMP_MIN_TCP_HDR > mp->b_wptr) {
5592 5593 goto noticmpv4;
5593 5594 }
5594 5595
5595 5596 seg_seq = ntohl(tcpha->tha_seq);
5596 5597 switch (icmph->icmph_type) {
5597 5598 case ICMP_DEST_UNREACHABLE:
5598 5599 switch (icmph->icmph_code) {
5599 5600 case ICMP_FRAGMENTATION_NEEDED:
5600 5601 /*
5601 5602 * Update Path MTU, then try to send something out.
5602 5603 */
5603 5604 tcp_update_pmtu(tcp, B_TRUE);
5604 5605 tcp_rexmit_after_error(tcp);
5605 5606 break;
5606 5607 case ICMP_PORT_UNREACHABLE:
5607 5608 case ICMP_PROTOCOL_UNREACHABLE:
5608 5609 switch (tcp->tcp_state) {
5609 5610 case TCPS_SYN_SENT:
5610 5611 case TCPS_SYN_RCVD:
5611 5612 /*
5612 5613 * ICMP can snipe away incipient
5613 5614 * TCP connections as long as
5614 5615 * seq number is same as initial
5615 5616 * send seq number.
5616 5617 */
5617 5618 if (seg_seq == tcp->tcp_iss) {
5618 5619 (void) tcp_clean_death(tcp,
5619 5620 ECONNREFUSED);
5620 5621 }
5621 5622 break;
5622 5623 }
5623 5624 break;
5624 5625 case ICMP_HOST_UNREACHABLE:
5625 5626 case ICMP_NET_UNREACHABLE:
5626 5627 /* Record the error in case we finally time out. */
5627 5628 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE)
5628 5629 tcp->tcp_client_errno = EHOSTUNREACH;
5629 5630 else
5630 5631 tcp->tcp_client_errno = ENETUNREACH;
5631 5632 if (tcp->tcp_state == TCPS_SYN_RCVD) {
5632 5633 if (tcp->tcp_listener != NULL &&
5633 5634 tcp->tcp_listener->tcp_syn_defense) {
5634 5635 /*
5635 5636 * Ditch the half-open connection if we
5636 5637 * suspect a SYN attack is under way.
5637 5638 */
5638 5639 (void) tcp_clean_death(tcp,
5639 5640 tcp->tcp_client_errno);
5640 5641 }
5641 5642 }
5642 5643 break;
5643 5644 default:
5644 5645 break;
5645 5646 }
5646 5647 break;
5647 5648 case ICMP_SOURCE_QUENCH: {
5648 5649 /*
5649 5650 * use a global boolean to control
5650 5651 * whether TCP should respond to ICMP_SOURCE_QUENCH.
5651 5652 * The default is false.
5652 5653 */
5653 5654 if (tcp_icmp_source_quench) {
5654 5655 /*
5655 5656 * Reduce the sending rate as if we got a
5656 5657 * retransmit timeout
5657 5658 */
5658 5659 uint32_t npkt;
5659 5660
5660 5661 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) /
5661 5662 tcp->tcp_mss;
5662 5663 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss;
5663 5664 tcp->tcp_cwnd = tcp->tcp_mss;
5664 5665 tcp->tcp_cwnd_cnt = 0;
5665 5666 }
5666 5667 break;
5667 5668 }
5668 5669 }
5669 5670 freemsg(mp);
5670 5671 }
5671 5672
5672 5673 /*
5673 5674 * tcp_icmp_error_ipv6 is called from tcp_icmp_input to process ICMPv6
5674 5675 * error messages passed up by IP.
5675 5676 * Assumes that IP has pulled up all the extension headers as well
5676 5677 * as the ICMPv6 header.
5677 5678 */
5678 5679 static void
5679 5680 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, ip_recv_attr_t *ira)
5680 5681 {
5681 5682 icmp6_t *icmp6;
5682 5683 ip6_t *ip6h;
5683 5684 uint16_t iph_hdr_length = ira->ira_ip_hdr_length;
5684 5685 tcpha_t *tcpha;
5685 5686 uint8_t *nexthdrp;
5686 5687 uint32_t seg_seq;
5687 5688
5688 5689 /*
5689 5690 * Verify that we have a complete IP header.
5690 5691 */
5691 5692 ASSERT((MBLKL(mp) >= sizeof (ip6_t)));
5692 5693
5693 5694 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length];
5694 5695 ip6h = (ip6_t *)&icmp6[1];
5695 5696 /*
5696 5697 * Verify if we have a complete ICMP and inner IP header.
5697 5698 */
5698 5699 if ((uchar_t *)&ip6h[1] > mp->b_wptr) {
5699 5700 noticmpv6:
5700 5701 freemsg(mp);
5701 5702 return;
5702 5703 }
5703 5704
5704 5705 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp))
5705 5706 goto noticmpv6;
5706 5707 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length);
5707 5708 /*
5708 5709 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't
5709 5710 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the
5710 5711 * packet.
5711 5712 */
5712 5713 if ((*nexthdrp != IPPROTO_TCP) ||
5713 5714 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) {
5714 5715 goto noticmpv6;
5715 5716 }
5716 5717
5717 5718 seg_seq = ntohl(tcpha->tha_seq);
5718 5719 switch (icmp6->icmp6_type) {
5719 5720 case ICMP6_PACKET_TOO_BIG:
5720 5721 /*
5721 5722 * Update Path MTU, then try to send something out.
5722 5723 */
5723 5724 tcp_update_pmtu(tcp, B_TRUE);
5724 5725 tcp_rexmit_after_error(tcp);
5725 5726 break;
5726 5727 case ICMP6_DST_UNREACH:
5727 5728 switch (icmp6->icmp6_code) {
5728 5729 case ICMP6_DST_UNREACH_NOPORT:
5729 5730 if (((tcp->tcp_state == TCPS_SYN_SENT) ||
5730 5731 (tcp->tcp_state == TCPS_SYN_RCVD)) &&
5731 5732 (seg_seq == tcp->tcp_iss)) {
5732 5733 (void) tcp_clean_death(tcp, ECONNREFUSED);
5733 5734 }
5734 5735 break;
5735 5736 case ICMP6_DST_UNREACH_ADMIN:
5736 5737 case ICMP6_DST_UNREACH_NOROUTE:
5737 5738 case ICMP6_DST_UNREACH_BEYONDSCOPE:
5738 5739 case ICMP6_DST_UNREACH_ADDR:
5739 5740 /* Record the error in case we finally time out. */
5740 5741 tcp->tcp_client_errno = EHOSTUNREACH;
5741 5742 if (((tcp->tcp_state == TCPS_SYN_SENT) ||
5742 5743 (tcp->tcp_state == TCPS_SYN_RCVD)) &&
5743 5744 (seg_seq == tcp->tcp_iss)) {
5744 5745 if (tcp->tcp_listener != NULL &&
5745 5746 tcp->tcp_listener->tcp_syn_defense) {
5746 5747 /*
5747 5748 * Ditch the half-open connection if we
5748 5749 * suspect a SYN attack is under way.
5749 5750 */
5750 5751 (void) tcp_clean_death(tcp,
5751 5752 tcp->tcp_client_errno);
5752 5753 }
5753 5754 }
5754 5755
5755 5756
5756 5757 break;
5757 5758 default:
5758 5759 break;
5759 5760 }
5760 5761 break;
5761 5762 case ICMP6_PARAM_PROB:
5762 5763 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
5763 5764 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER &&
5764 5765 (uchar_t *)ip6h + icmp6->icmp6_pptr ==
5765 5766 (uchar_t *)nexthdrp) {
5766 5767 if (tcp->tcp_state == TCPS_SYN_SENT ||
5767 5768 tcp->tcp_state == TCPS_SYN_RCVD) {
5768 5769 (void) tcp_clean_death(tcp, ECONNREFUSED);
5769 5770 }
5770 5771 break;
5771 5772 }
5772 5773 break;
5773 5774
5774 5775 case ICMP6_TIME_EXCEEDED:
5775 5776 default:
5776 5777 break;
5777 5778 }
5778 5779 freemsg(mp);
5779 5780 }
5780 5781
5781 5782 /*
5782 5783 * CALLED OUTSIDE OF SQUEUE! It can not follow any pointers that tcp might
5783 5784 * change. But it can refer to fields like tcp_suna and tcp_snxt.
5784 5785 *
5785 5786 * Function tcp_verifyicmp is called as conn_verifyicmp to verify the ICMP
5786 5787 * error messages received by IP. The message is always received on the correct
5787 5788 * tcp_t.
5788 5789 */
5789 5790 /* ARGSUSED */
5790 5791 boolean_t
5791 5792 tcp_verifyicmp(conn_t *connp, void *arg2, icmph_t *icmph, icmp6_t *icmp6,
5792 5793 ip_recv_attr_t *ira)
5793 5794 {
5794 5795 tcpha_t *tcpha = (tcpha_t *)arg2;
5795 5796 uint32_t seq = ntohl(tcpha->tha_seq);
5796 5797 tcp_t *tcp = connp->conn_tcp;
5797 5798
5798 5799 /*
5799 5800 * TCP sequence number contained in payload of the ICMP error message
5800 5801 * should be within the range SND.UNA <= SEG.SEQ < SND.NXT. Otherwise,
5801 5802 * the message is either a stale ICMP error, or an attack from the
5802 5803 * network. Fail the verification.
5803 5804 */
5804 5805 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt))
5805 5806 return (B_FALSE);
5806 5807
5807 5808 /* For "too big" we also check the ignore flag */
5808 5809 if (ira->ira_flags & IRAF_IS_IPV4) {
5809 5810 ASSERT(icmph != NULL);
5810 5811 if (icmph->icmph_type == ICMP_DEST_UNREACHABLE &&
5811 5812 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED &&
5812 5813 tcp->tcp_tcps->tcps_ignore_path_mtu)
5813 5814 return (B_FALSE);
5814 5815 } else {
5815 5816 ASSERT(icmp6 != NULL);
5816 5817 if (icmp6->icmp6_type == ICMP6_PACKET_TOO_BIG &&
5817 5818 tcp->tcp_tcps->tcps_ignore_path_mtu)
5818 5819 return (B_FALSE);
5819 5820 }
5820 5821 return (B_TRUE);
5821 5822 }
↓ open down ↓ |
3860 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX