亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? tcp.h

?? 《嵌入式系統設計與實例開發實驗教材二源碼》Linux內核移植與編譯實驗
?? H
?? 第 1 頁 / 共 4 頁
字號:
 */static inline void tcp_initialize_rcv_mss(struct sock *sk){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	unsigned int hint = min(tp->advmss, tp->mss_cache);	hint = min(hint, tp->rcv_wnd/2);	hint = min(hint, TCP_MIN_RCVMSS);	hint = max(hint, TCP_MIN_MSS);	tp->ack.rcv_mss = hint;}static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd){	tp->pred_flags = htonl((tp->tcp_header_len << 26) |			       ntohl(TCP_FLAG_ACK) |			       snd_wnd);}static __inline__ void tcp_fast_path_on(struct tcp_opt *tp){	__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);}static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp){	if (skb_queue_len(&tp->out_of_order_queue) == 0 &&	    tp->rcv_wnd &&	    atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&	    !tp->urg_data)		tcp_fast_path_on(tp);}/* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. */static __inline__ u32 tcp_receive_window(struct tcp_opt *tp){	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;	if (win < 0)		win = 0;	return (u32) win;}/* Choose a new window, without checks for shrinking, and without * scaling applied to the result.  The caller does these things * if necessary.  This is a "raw" window selection. */extern u32	__tcp_select_window(struct sock *sk);/* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot * of jiffies in the buffer control blocks below.  We decidely * only use of the low 32-bits of jiffies and hide the ugly * casts with the following macro. */#define tcp_time_stamp		((__u32)(jiffies))/* This is what the send packet queueing engine uses to pass * TCP per-packet control information to the transmission * code.  We also store the host-order sequence numbers in * here too.  This is 36 bytes on 32-bit architectures, * 40 bytes on 64-bit machines, if this grows please adjust * skbuff.h:skbuff->cb[xxx] size appropriately. */struct tcp_skb_cb {	union {		struct inet_skb_parm	h4;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)		struct inet6_skb_parm	h6;#endif	} header;	/* For incoming frames		*/	__u32		seq;		/* Starting sequence number	*/	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/	__u32		when;		/* used to compute rtt's	*/	__u8		flags;		/* TCP header flags.		*/	/* NOTE: These must match up to the flags byte in a	 *       real TCP header.	 */#define TCPCB_FLAG_FIN		0x01#define TCPCB_FLAG_SYN		0x02#define TCPCB_FLAG_RST		0x04#define TCPCB_FLAG_PSH		0x08#define TCPCB_FLAG_ACK		0x10#define TCPCB_FLAG_URG		0x20#define TCPCB_FLAG_ECE		0x40#define TCPCB_FLAG_CWR		0x80	__u8		sacked;		/* State flags for SACK/FACK.	*/#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/#define TCPCB_LOST		0x04	/* SKB is lost			*/#define TCPCB_TAGBITS		0x07	/* All tag bits			*/#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)#define TCPCB_URG		0x20	/* Urgent pointer advenced here	*/#define TCPCB_AT_TAIL		(TCPCB_URG)	__u16		urg_ptr;	/* Valid w/URG flags is set.	*/	__u32		ack_seq;	/* Sequence number ACK'd	*/};#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))#define for_retrans_queue(skb, sk, tp) \		for (skb = (sk)->write_queue.next;			\		     (skb != (tp)->send_head) &&			\		     (skb != (struct sk_buff *)&(sk)->write_queue);	\		     skb=skb->next)#include <net/tcp_ecn.h>/* *	Compute minimal free write space needed to queue new packets.  */static inline int tcp_min_write_space(struct sock *sk){	return sk->wmem_queued/2;} static inline int tcp_wspace(struct sock *sk){	return sk->sndbuf - sk->wmem_queued;}/* This determines how many packets are "in the network" to the best * of our knowledge.  In many cases it is conservative, but where * detailed information is available from the receiver (via SACK * blocks etc.) we can make more aggressive calculations. * * Use this for decisions involving congestion control, use just * tp->packets_out to determine if the send queue is empty or not. * * Read this equation as: * *	"Packets sent once on transmission queue" MINUS *	"Packets left network, but not honestly ACKed yet" PLUS *	"Packets fast retransmitted" */static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp){	return tp->packets_out - tp->left_out + tp->retrans_out;}/* Recalculate snd_ssthresh, we want to set it to: * * 	one half the current congestion window, but no *	less than two segments */static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp){	return max(tp->snd_cwnd >> 1U, 2U);}/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. */static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp){	if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))		return tp->snd_ssthresh;	else		return max(tp->snd_ssthresh,			   ((tp->snd_cwnd >> 1) +			    (tp->snd_cwnd >> 2)));}static inline void tcp_sync_left_out(struct tcp_opt *tp){	if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)		tp->sacked_out = tp->packets_out - tp->lost_out;	tp->left_out = tp->sacked_out + tp->lost_out;}extern void tcp_cwnd_application_limited(struct sock *sk);/* Congestion window validation. (RFC2861) */static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp){	if (tp->packets_out >= tp->snd_cwnd) {		/* Network is feed fully. */		tp->snd_cwnd_used = 0;		tp->snd_cwnd_stamp = tcp_time_stamp;	} else {		/* Network starves. */		if (tp->packets_out > tp->snd_cwnd_used)			tp->snd_cwnd_used = tp->packets_out;		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)			tcp_cwnd_application_limited(sk);	}}/* Set slow start threshould and cwnd not falling to slow start */static inline void __tcp_enter_cwr(struct tcp_opt *tp){	tp->undo_marker = 0;	tp->snd_ssthresh = tcp_recalc_ssthresh(tp);	tp->snd_cwnd = min(tp->snd_cwnd,			   tcp_packets_in_flight(tp) + 1U);	tp->snd_cwnd_cnt = 0;	tp->high_seq = tp->snd_nxt;	tp->snd_cwnd_stamp = tcp_time_stamp;	TCP_ECN_queue_cwr(tp);}static inline void tcp_enter_cwr(struct tcp_opt *tp){	tp->prior_ssthresh = 0;	if (tp->ca_state < TCP_CA_CWR) {		__tcp_enter_cwr(tp);		tp->ca_state = TCP_CA_CWR;	}}extern __u32 tcp_init_cwnd(struct tcp_opt *tp);/* Slow start with delack produces 3 packets of burst, so that * it is safe "de facto". */static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp){	return 3;}static __inline__ int tcp_minshall_check(struct tcp_opt *tp){	return after(tp->snd_sml,tp->snd_una) &&		!after(tp->snd_sml, tp->snd_nxt);}static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb){	if (skb->len < mss)		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;}/* Return 0, if packet can be sent now without violation Nagle's rules:   1. It is full sized.   2. Or it contains FIN.   3. Or TCP_NODELAY was set.   4. Or TCP_CORK is not set, and all sent packets are ACKed.      With Minshall's modification: all sent small packets are ACKed. */static __inline__ inttcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle){	return (skb->len < mss_now &&		!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&		(nonagle == 2 ||		 (!nonagle &&		  tp->packets_out &&		  tcp_minshall_check(tp))));}/* This checks if the data bearing packet SKB (usually tp->send_head) * should be put on the wire right now. */static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,				   unsigned cur_mss, int nonagle){	/*	RFC 1122 - section 4.2.3.4	 *	 *	We must queue if	 *	 *	a) The right edge of this frame exceeds the window	 *	b) There are packets in flight and we have a small segment	 *	   [SWS avoidance and Nagle algorithm]	 *	   (part of SWS is done on packetization)	 *	   Minshall version sounds: there are no _small_	 *	   segments in flight. (tcp_nagle_check)	 *	c) We have too many packets 'in flight'	 *	 * 	Don't use the nagle rule for urgent data (or	 *	for the final FIN -DaveM).	 *	 *	Also, Nagle rule does not apply to frames, which	 *	sit in the middle of queue (they have no chances	 *	to get new data) and if room at tail of skb is	 *	not enough to save something seriously (<32 for now).	 */	/* Don't be strict about the congestion window for the	 * final FIN frame.  -DaveM	 */	return ((nonagle==1 || tp->urg_mode		 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&		((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||		 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&		!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));}static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp){	if (!tp->packets_out && !tp->pending)		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);}static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb){	return (skb->next == (struct sk_buff*)&sk->write_queue);}/* Push out any pending frames which were held back due to * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */static __inline__ void __tcp_push_pending_frames(struct sock *sk,						 struct tcp_opt *tp,						 unsigned cur_mss,						 int nonagle){	struct sk_buff *skb = tp->send_head;	if (skb) {		if (!tcp_skb_is_last(sk, skb))			nonagle = 1;		if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||		    tcp_write_xmit(sk, nonagle))			tcp_check_probe_timer(sk, tp);	}	tcp_cwnd_validate(sk, tp);}static __inline__ void tcp_push_pending_frames(struct sock *sk,					       struct tcp_opt *tp){	__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);}static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp){	struct sk_buff *skb = tp->send_head;	return (skb &&		tcp_snd_test(tp, skb, tcp_current_mss(sk),			     tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));}static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq){	tp->snd_wl1 = seq;}static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq){	tp->snd_wl1 = seq;}extern void			tcp_destroy_sock(struct sock *sk);/* * Calculate(/check) TCP checksum */static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,				   unsigned long saddr, unsigned long daddr, 				   unsigned long base){	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);}static __inline__ int __tcp_checksum_complete(struct sk_buff *skb){	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));}static __inline__ int tcp_checksum_complete(struct sk_buff *skb){	return skb->ip_summed != CHECKSUM_UNNECESSARY &&		__tcp_checksum_complete(skb);}/* Prequeue for VJ style copy to user, combined with checksumming. */static __inline__ void tcp_prequeue_init(struct tcp_opt *tp){	tp->ucopy.task = NULL;	tp->ucopy.len = 0;	tp->ucopy.memory = 0;	skb_queue_head_init(&tp->ucopy.prequeue);}/* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8)				  --ANK * * NOTE: is this not too big to inline? */static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	if (tp->ucopy.task) {		__skb_queue_tail(&tp->ucopy.prequeue, skb);		tp->ucopy.memory += skb->truesize;		if (tp->ucopy.memory > sk->rcvbuf) {			struct sk_buff *skb1;			if (sk->lock.users) BUG();			while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {				sk->backlog_rcv(sk, skb1);				NET_INC_STATS_BH(TCPPrequeueDropped);			}			tp->ucopy.memory = 0;		} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {			wake_up_interruptible(sk->sleep);			if (!tcp_ack_scheduled(tp))				tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);		}		return 1;	}	return 0;}#undef STATE_TRACE#ifdef STATE_TRACEstatic char *statename[]={	"Unused","Established","Syn Sent","Syn Recv",	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",	"Close Wait","Last ACK","Listen","Closing"};#endifstatic __inline__ void tcp_set_state(struct sock *sk, int state){	int oldstate = sk->state;	switch (state) {	case TCP_ESTABLISHED:		if (oldstate != TCP_ESTABLISHED)			TCP_INC_STATS(TcpCurrEstab);		break;	case TCP_CLOSE:		sk->prot->unhash(sk);		if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))			tcp_put_port(sk);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
无吗不卡中文字幕| 中文字幕在线一区免费| 精品91自产拍在线观看一区| 国产精一品亚洲二区在线视频| 中文字幕一区二区不卡| 美女脱光内衣内裤视频久久影院| 久久久久久日产精品| 日韩视频在线永久播放| 成人免费高清视频在线观看| 亚欧色一区w666天堂| 国产偷国产偷亚洲高清人白洁 | 成+人+亚洲+综合天堂| 婷婷激情综合网| 1区2区3区精品视频| 日韩欧美三级在线| 欧美日韩国产综合一区二区| 国产成人亚洲综合a∨婷婷图片| 日韩高清不卡在线| 一区二区三区欧美亚洲| 欧美国产日韩精品免费观看| 日韩免费高清电影| 欧美乱妇15p| 欧美性猛交xxxx黑人交| 成人精品国产福利| 国产麻豆视频一区二区| 麻豆国产精品官网| 天堂蜜桃91精品| 亚洲影院理伦片| 亚洲手机成人高清视频| 国产欧美精品在线观看| 日韩精品专区在线影院重磅| 91麻豆精品国产自产在线| 91久久久免费一区二区| av一本久道久久综合久久鬼色| 国内久久婷婷综合| 精品一区二区三区免费| 奇米色一区二区三区四区| 亚洲成人激情社区| 亚洲在线视频网站| 一区二区三区蜜桃网| 亚洲日本韩国一区| 亚洲图片你懂的| 亚洲欧美在线观看| 国产精品高清亚洲| 亚洲欧洲韩国日本视频| 一区在线中文字幕| 亚洲欧洲日本在线| 亚洲色图自拍偷拍美腿丝袜制服诱惑麻豆| 久久精品视频一区二区三区| 久久人人爽爽爽人久久久| 久久综合一区二区| 久久久久久久久97黄色工厂| 国产网红主播福利一区二区| 国产欧美精品一区二区色综合朱莉| 日本一区二区三区电影| 午夜婷婷国产麻豆精品| 午夜成人免费电影| 七七婷婷婷婷精品国产| 久久精品国产网站| 国内不卡的二区三区中文字幕| 国产精品一区二区三区99| 国产精品夜夜嗨| 99精品在线观看视频| 欧美怡红院视频| 欧美一级电影网站| 精品sm在线观看| 国产精品第13页| 亚洲图片欧美一区| 蜜桃精品视频在线| 国产suv精品一区二区6| 91免费看片在线观看| 欧美视频你懂的| 精品国产区一区| 国产精品天美传媒沈樵| 亚洲丶国产丶欧美一区二区三区| 久久国产精品一区二区| 丰满亚洲少妇av| 欧美午夜宅男影院| 精品99999| 一区二区三区在线播放| 蜜臀av性久久久久av蜜臀妖精 | 91亚洲午夜精品久久久久久| 欧美日韩在线亚洲一区蜜芽| 欧美不卡在线视频| 亚洲欧美激情一区二区| 日本亚洲天堂网| 成人精品一区二区三区中文字幕| 在线观看视频一区二区欧美日韩| 日韩亚洲国产中文字幕欧美| 中文字幕中文在线不卡住| 日韩精彩视频在线观看| 国产一区二区三区在线观看精品 | 欧美韩国日本综合| 亚洲国产欧美日韩另类综合 | 91免费国产在线| 一区二区三区在线免费观看| 亚洲自拍偷拍麻豆| 不卡欧美aaaaa| 欧美一区二区三区白人 | 午夜精品在线看| 久久99久久精品欧美| 91亚洲精品一区二区乱码| 欧美久久免费观看| 中文字幕中文乱码欧美一区二区| 蜜臀av一区二区| 欧美日韩www| 亚洲裸体xxx| av不卡在线播放| 久久久精品2019中文字幕之3| 亚洲一二三四久久| 丁香桃色午夜亚洲一区二区三区| 日韩精品一区二区三区在线| 久久成人免费电影| 337p日本欧洲亚洲大胆精品| 日韩精品成人一区二区三区| 亚洲电影第三页| 99精品热视频| 中文字幕欧美日韩一区| 在线播放一区二区三区| 欧美视频在线一区二区三区| 色悠久久久久综合欧美99| 91老司机福利 在线| 国产日韩欧美一区二区三区综合| 日本强好片久久久久久aaa| 欧美主播一区二区三区美女| 国产精品盗摄一区二区三区| 国产精品一区免费在线观看| 精品久久久久99| 日本伊人色综合网| 亚洲国产毛片aaaaa无费看 | 韩国一区二区视频| 91精品国产欧美一区二区| 日本sm残虐另类| 久久综合九色综合97婷婷 | 欧美精品日韩一区| 国产不卡视频在线观看| 成人18视频在线播放| 在线视频国产一区| 欧美一区二区女人| 婷婷成人综合网| 欧美日韩国产一区| 无码av免费一区二区三区试看| 在线欧美日韩国产| 亚洲一区二区在线视频| 91九色02白丝porn| 亚洲精品国产一区二区精华液| 色狠狠色噜噜噜综合网| 亚洲女人的天堂| 欧美专区亚洲专区| 亚洲国产精品一区二区www| 欧美日韩五月天| 午夜精品福利一区二区蜜股av | 亚洲国产中文字幕| 宅男噜噜噜66一区二区66| 蜜桃视频在线观看一区| 久久综合色天天久久综合图片| 国产成人免费在线视频| 中文字幕一区二区三区不卡在线 | 亚洲三级电影全部在线观看高清| 色国产综合视频| 五月激情丁香一区二区三区| 欧美www视频| 国产69精品一区二区亚洲孕妇| 亚洲国产成人午夜在线一区| 在线这里只有精品| 青青草91视频| 国产精品亲子伦对白| 91免费在线视频观看| 午夜一区二区三区视频| 精品国产污污免费网站入口| 成人看片黄a免费看在线| 亚洲午夜国产一区99re久久| 精品少妇一区二区三区免费观看| 成人免费福利片| 日韩影院在线观看| 国产性做久久久久久| 色婷婷综合久久久久中文一区二区| 调教+趴+乳夹+国产+精品| 久久人人97超碰com| gogo大胆日本视频一区| 五月天网站亚洲| 欧美性三三影院| 国产精品伊人色| 亚洲精品欧美在线| av亚洲精华国产精华精| 无码av中文一区二区三区桃花岛| 亚洲另类中文字| 亚洲一区二区三区四区在线| 国产精品的网站| 国产调教视频一区| 亚洲国产另类精品专区| 亚洲免费资源在线播放| 一区二区三区国产豹纹内裤在线 | 日韩电影免费在线看| 成人免费视频在线观看| 中文字幕亚洲综合久久菠萝蜜| 1000部国产精品成人观看| 亚洲女同女同女同女同女同69| 人人狠狠综合久久亚洲| 日本不卡一区二区|