out of struct sock.
Fix -EFAULT handling in TCP direct user copy handling.
Use struct initializers in IPV6 ndisc code.
sk->state = PPPOX_CONNECTED;
}
- sk->num = sp->sa_addr.pppoe.sid;
+ po->num = sp->sa_addr.pppoe.sid;
end:
release_sock(sk);
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
- hdr.sid = sk->num;
+ hdr.sid = po->num;
lock_sock(sk);
hdr.ver = 1;
hdr.type = 1;
hdr.code = 0;
- hdr.sid = sk->num;
+ hdr.sid = po->num;
hdr.length = htons(skb->len);
if (!dev)
unsigned char port;
unsigned char station;
unsigned char net;
+ unsigned short num;
};
#define ec_sk(__sk) ((struct econet_opt *)(__sk)->protinfo)
union {
struct pppoe_opt pppoe;
} proto;
+ unsigned short num;
};
#define pppoe_dev proto.pppoe.dev
#define pppoe_pa proto.pppoe.pa
#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
struct inet_opt {
+ /* Socket demultiplex comparisons on incoming packets. */
+ __u32 daddr; /* Foreign IPv4 addr */
+ __u32 rcv_saddr; /* Bound local IPv4 addr */
+ __u16 dport; /* Destination port */
+ __u16 num; /* Local port */
+ __u32 saddr; /* Sending source */
int ttl; /* TTL setting */
int tos; /* TOS */
unsigned cmsg_flags;
struct ip_options *opt;
+ __u16 sport; /* Source port */
unsigned char hdrincl; /* Include headers ? */
__u8 mc_ttl; /* Multicasting TTL */
__u8 mc_loop; /* Loopback */
+ __u8 pmtudisc;
+ __u16 id; /* ID counter for DF pkts */
unsigned recverr : 1,
freebind : 1;
- __u16 id; /* ID counter for DF pkts */
- __u8 pmtudisc;
int mc_index; /* Multicast device index */
__u32 mc_addr;
struct ip_mc_socklist *mc_list; /* Group array */
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
- iph->id = (sk && sk->daddr) ? htons(inet_sk(sk)->id++) : 0;
+ iph->id = (sk && inet_sk(sk)->daddr) ?
+ htons(inet_sk(sk)->id++) : 0;
} else
__ip_select_ident(iph, dst);
}
} while(0);
struct sock {
- /* Socket demultiplex comparisons on incoming packets. */
- __u32 daddr; /* Foreign IPv4 addr */
- __u32 rcv_saddr; /* Bound local IPv4 addr */
- __u16 dport; /* Destination port */
- unsigned short num; /* Local port */
- int bound_dev_if; /* Bound device index if != 0 */
-
+ /* Begin of struct sock/struct tcp_tw_bucket shared layout */
+ volatile unsigned char state, /* Connection state */
+ zapped; /* ax25 & ipx means !linked */
+ unsigned char reuse; /* SO_REUSEADDR setting */
+ unsigned char shutdown;
+ int bound_dev_if; /* Bound device index if != 0 */
/* Main hash linkage for various protocol lookup tables. */
struct sock *next;
struct sock **pprev;
struct sock *bind_next;
struct sock **bind_pprev;
-
- volatile unsigned char state, /* Connection state */
- zapped; /* In ax25 & ipx means not linked */
- __u16 sport; /* Source port */
-
- unsigned short family; /* Address family */
- unsigned char reuse; /* SO_REUSEADDR setting */
- unsigned char shutdown;
atomic_t refcnt; /* Reference count */
-
+ unsigned short family; /* Address family */
+ /* End of struct sock/struct tcp_tw_bucket shared layout */
+ unsigned char use_write_queue;
+ unsigned char userlocks;
socket_lock_t lock; /* Synchronizer... */
int rcvbuf; /* Size of receive buffer in bytes */
atomic_t omem_alloc; /* "o" is "option" or "other" */
int wmem_queued; /* Persistent queue size */
int forward_alloc; /* Space allocated forward. */
- __u32 saddr; /* Sending source */
unsigned int allocation; /* Allocation mode */
int sndbuf; /* Size of send buffer in bytes */
struct sock *prev;
bsdism;
unsigned char debug;
unsigned char rcvtstamp;
- unsigned char use_write_queue;
- unsigned char userlocks;
- /* Hole of 3 bytes. Try to pack. */
+ /* Hole of 1 byte. Try to pack. */
int route_caps;
int proc;
unsigned long lingertime;
* 2) If all sockets have sk->reuse set, and none of them are in
* TCP_LISTEN state, the port may be shared.
* Failing that, goto test 3.
- * 3) If all sockets are bound to a specific sk->rcv_saddr local
+ * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
* address, and none of them are the same, the port may be
* shared.
* Failing this, the port cannot be shared.
* XXX Yes I know this is gross, but I'd have to edit every single
* XXX networking file if I created a "struct sock_header". -DaveM
*/
- __u32 daddr;
- __u32 rcv_saddr;
- __u16 dport;
- unsigned short num;
+ volatile unsigned char state, /* Connection state */
+ substate; /* "zapped" -> "substate" */
+ unsigned char reuse; /* SO_REUSEADDR setting */
+ unsigned char rcv_wscale; /* also TW bucket specific */
int bound_dev_if;
+ /* Main hash linkage for various protocol lookup tables. */
struct sock *next;
struct sock **pprev;
struct sock *bind_next;
struct sock **bind_pprev;
- unsigned char state,
- substate; /* "zapped" is replaced with "substate" */
- __u16 sport;
- unsigned short family;
- unsigned char reuse,
- rcv_wscale; /* It is also TW bucket specific */
atomic_t refcnt;
-
+ unsigned short family;
+ /* End of struct sock/struct tcp_tw_bucket shared layout */
+ __u16 sport;
+ /* Socket demultiplex comparisons on incoming packets. */
+ /* these five are in inet_opt */
+ __u32 daddr;
+ __u32 rcv_saddr;
+ __u16 dport;
+ __u16 num;
/* And these are ours. */
int hashent;
int timeout;
__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \
- ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
+ (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
+ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((__sk)->daddr == (__saddr)) && \
- ((__sk)->rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
+ ((inet_sk(__sk)->daddr == (__saddr)) && \
+ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
+ ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
- (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
+ (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
((__sk)->family == AF_INET6) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
!ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
{
- return tcp_lhashfn(sk->num);
+ return tcp_lhashfn(inet_sk(sk)->num);
}
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define _UDP_H
#include <linux/udp.h>
+#include <linux/ip.h>
#include <net/sock.h>
#define UDP_HTABLE_SIZE 128
struct sock *sk = udp_hash[num & (UDP_HTABLE_SIZE - 1)];
for(; sk != NULL; sk = sk->next) {
- if(sk->num == num)
+ if (inet_sk(sk)->num == num)
return 1;
}
return 0;
memset(eo, 0, sizeof(*eo));
sk->zapped=0;
sk->family = PF_ECONET;
- sk->num = protocol;
+ eo->num = protocol;
sklist_insert_socket(&econet_sklist, sk);
return(0);
static int inet_autobind(struct sock *sk)
{
+ struct inet_opt *inet = inet_sk(sk);
/* We may need to bind the socket. */
lock_sock(sk);
- if (sk->num == 0) {
+ if (!inet->num) {
if (sk->prot->get_port(sk, 0) != 0) {
release_sock(sk);
return -EAGAIN;
}
- sk->sport = htons(sk->num);
+ inet->sport = htons(inet->num);
}
release_sock(sk);
return 0;
inet = inet_sk(sk);
if (SOCK_RAW == sock->type) {
- sk->num = protocol;
+ inet->num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
atomic_inc(&inet_sock_nr);
#endif
- if (sk->num) {
+ if (inet->num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically
* shares.
*/
- sk->sport = htons(sk->num);
+ inet->sport = htons(inet->num);
/* Add to protocol hash chains. */
sk->prot->hash(sk);
/* Check these errors (active socket, double bind). */
err = -EINVAL;
- if ((sk->state != TCP_CLOSE) ||
- (sk->num != 0))
+ if (sk->state != TCP_CLOSE || inet->num)
goto out;
- sk->rcv_saddr = sk->saddr = addr->sin_addr.s_addr;
+ inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
- sk->saddr = 0; /* Use device */
+ inet->saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0) {
- sk->saddr = sk->rcv_saddr = 0;
+ inet->saddr = inet->rcv_saddr = 0;
err = -EADDRINUSE;
goto out;
}
- if (sk->rcv_saddr)
+ if (inet->rcv_saddr)
sk->userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->userlocks |= SOCK_BINDPORT_LOCK;
- sk->sport = htons(sk->num);
- sk->daddr = 0;
- sk->dport = 0;
+ inet->sport = htons(inet->num);
+ inet->daddr = 0;
+ inet->dport = 0;
sk_dst_reset(sk);
err = 0;
out:
if (uaddr->sa_family == AF_UNSPEC)
return sk->prot->disconnect(sk, flags);
- if (sk->num==0 && inet_autobind(sk) != 0)
+ if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
return sk->prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
int addr_len, int flags)
{
struct sock *sk=sock->sk;
+ struct inet_opt *inet = inet_sk(sk);
int err;
long timeo;
goto out;
err = -EAGAIN;
- if (sk->num == 0) {
+ if (!inet->num) {
if (sk->prot->get_port(sk, 0) != 0)
goto out;
- sk->sport = htons(sk->num);
+ inet->sport = htons(inet->num);
}
err = sk->prot->connect(sk, uaddr, addr_len);
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
+ struct inet_opt *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
sin->sin_family = AF_INET;
if (peer) {
- if (!sk->dport)
+ if (!inet->dport)
return -ENOTCONN;
if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
return -ENOTCONN;
- sin->sin_port = sk->dport;
- sin->sin_addr.s_addr = sk->daddr;
+ sin->sin_port = inet->dport;
+ sin->sin_addr.s_addr = inet->daddr;
} else {
- __u32 addr = sk->rcv_saddr;
+ __u32 addr = inet->rcv_saddr;
if (!addr)
- addr = sk->saddr;
- sin->sin_port = sk->sport;
+ addr = inet->saddr;
+ sin->sin_port = inet->sport;
sin->sin_addr.s_addr = addr;
}
*uaddr_len = sizeof(*sin);
struct sock *sk = sock->sk;
/* We may need to bind the socket. */
- if (sk->num==0 && inet_autobind(sk) != 0)
+ if (!inet_sk(sk)->num && inet_autobind(sk))
return -EAGAIN;
return sk->prot->sendmsg(sk, msg, size);
/* If socket is bound to an interface, only report
* the packet if it came from that interface.
*/
- if (sk && sk->num == protocol
+ if (sk && inet_sk(sk)->num == protocol
&& ((sk->bound_dev_if == 0)
|| (sk->bound_dev_if == skb->dev->ifindex))) {
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
u32 daddr;
/* Use correct destination address if we have options. */
- daddr = sk->daddr;
+ daddr = inet->daddr;
if(opt && opt->srr)
daddr = opt->faddr;
* keep trying until route appears or the connection times itself
* out.
*/
- if (ip_route_output(&rt, daddr, sk->saddr,
+ if (ip_route_output(&rt, daddr, inet->saddr,
RT_CONN_FLAGS(sk),
sk->bound_dev_if))
goto no_route;
if(opt && opt->optlen) {
iph->ihl += opt->optlen >> 2;
- ip_options_build(skb, opt, sk->daddr, rt, 0);
+ ip_options_build(skb, opt, inet->daddr, rt, 0);
}
return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
}
if (length + fragheaderlen > 0xFFFF) {
- ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
*/
if (offset > 0 && inet->pmtudisc == IP_PMTUDISC_DO) {
- ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, mtu);
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
return ip_build_xmit_slow(sk,getfrag,frag,length,ipc,rt,flags);
} else {
if (length > rt->u.dst.dev->mtu) {
- ip_local_error(sk, EMSGSIZE, rt->rt_dst, sk->dport, rt->u.dst.dev->mtu);
+ ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
+ rt->u.dst.dev->mtu);
return -EMSGSIZE;
}
}
{
struct ip_ra_chain *ra, *new_ra, **rap;
- if (sk->type != SOCK_RAW || sk->num == IPPROTO_RAW)
+ if (sk->type != SOCK_RAW || inet_sk(sk)->num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET ||
(!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
- && sk->daddr != LOOPBACK4_IPV6)) {
+ && inet->daddr != LOOPBACK4_IPV6)) {
#endif
if (opt)
tp->ext_header_len = opt->optlen;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
- info.ipi_addr.s_addr = sk->rcv_saddr;
- info.ipi_spec_dst.s_addr = sk->rcv_saddr;
+ info.ipi_addr.s_addr = inet->rcv_saddr;
+ info.ipi_spec_dst.s_addr = inet->rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
static int
getorigdst(struct sock *sk, int optval, void *user, int *len)
{
+ struct inet_opt *inet = inet_sk(sk);
struct ip_conntrack_tuple_hash *h;
- struct ip_conntrack_tuple tuple = { { sk->rcv_saddr, { sk->sport } },
- { sk->daddr, { sk->dport },
+ struct ip_conntrack_tuple tuple = { { inet->rcv_saddr,
+ { inet->sport } },
+ { inet->daddr,
+ { inet->dport },
IPPROTO_TCP } };
/* We only do TCP at the moment: is there a better way? */
static void raw_v4_hash(struct sock *sk)
{
- struct sock **skp = &raw_v4_htable[sk->num & (RAWV4_HTABLE_SIZE - 1)];
+ struct sock **skp = &raw_v4_htable[inet_sk(sk)->num &
+ (RAWV4_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v4_lock);
if ((sk->next = *skp) != NULL)
struct sock *s = sk;
for (s = sk; s; s = s->next) {
- if (s->num == num &&
- !(s->daddr && s->daddr != raddr) &&
- !(s->rcv_saddr && s->rcv_saddr != laddr) &&
+ struct inet_opt *inet = inet_sk(s);
+
+ if (inet->num == num &&
+ !(inet->daddr && inet->daddr != raddr) &&
+ !(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
!(s->bound_dev_if && s->bound_dev_if != dif))
break; /* gotcha */
}
err = -EINVAL;
if (sk->state != TCP_ESTABLISHED)
goto out;
- daddr = sk->daddr;
+ daddr = inet->daddr;
}
- ipc.addr = sk->saddr;
+ ipc.addr = inet->saddr;
ipc.opt = NULL;
ipc.oif = sk->bound_dev_if;
/* This gets rid of all the nasties in af_inet. -DaveM */
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
+ struct inet_opt *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
- sk->rcv_saddr = sk->saddr = addr->sin_addr.s_addr;
+ inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
- sk->saddr = 0; /* Use device */
+ inet->saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
static int raw_init(struct sock *sk)
{
struct raw_opt *tp = raw4_sk(sk);
- if (sk->num == IPPROTO_ICMP)
+ if (inet_sk(sk)->num == IPPROTO_ICMP)
memset(&tp->filter, 0, sizeof(tp->filter));
return 0;
}
return ip_setsockopt(sk, level, optname, optval, optlen);
if (optname == ICMP_FILTER) {
- if (sk->num != IPPROTO_ICMP)
+ if (inet_sk(sk)->num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_seticmpfilter(sk, optval, optlen);
return ip_getsockopt(sk, level, optname, optval, optlen);
if (optname == ICMP_FILTER) {
- if (sk->num != IPPROTO_ICMP)
+ if (inet_sk(sk)->num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
static void get_raw_sock(struct sock *sp, char *tmpbuf, int i)
{
- unsigned int dest = sp->daddr,
- src = sp->rcv_saddr;
+ struct inet_opt *inet = inet_sk(sp);
+ unsigned int dest = inet->daddr,
+ src = inet->rcv_saddr;
__u16 destp = 0,
- srcp = sp->num;
+ srcp = inet->num;
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
int tcp_listen_start(struct sock *sk)
{
+ struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
* after validation is complete.
*/
sk->state = TCP_LISTEN;
- if (sk->prot->get_port(sk, sk->num) == 0) {
- sk->sport = htons(sk->num);
+ if (!sk->prot->get_port(sk, inet->num)) {
+ inet->sport = htons(inet->num);
sk_dst_reset(sk);
sk->prot->hash(sk);
/* It cannot be in hash table! */
BUG_TRAP(sk->pprev==NULL);
- /* If it has not 0 sk->num, it must be bound */
- BUG_TRAP(!sk->num || sk->prev!=NULL);
+ /* If it has not 0 inet_sk(sk)->num, it must be bound */
+ BUG_TRAP(!inet_sk(sk)->num || sk->prev);
#ifdef TCP_DEBUG
if (sk->zapped) {
int tcp_disconnect(struct sock *sk, int flags)
{
+ struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
int old_state;
int err = 0;
tcp_writequeue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
- sk->dport = 0;
+ inet->dport = 0;
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
- sk->rcv_saddr = 0;
- sk->saddr = 0;
+ inet->rcv_saddr = inet->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
tcp_sack_reset(tp);
__sk_dst_reset(sk);
- BUG_TRAP(!sk->num || sk->prev);
+ BUG_TRAP(!inet->num || sk->prev);
sk->error_report(sk);
return err;
static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
int ext, u32 pid, u32 seq)
{
+ struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcpdiagmsg *r;
struct nlmsghdr *nlh;
r->tcpdiag_timer = 0;
r->tcpdiag_retrans = 0;
- r->id.tcpdiag_sport = sk->sport;
- r->id.tcpdiag_dport = sk->dport;
- r->id.tcpdiag_src[0] = sk->rcv_saddr;
- r->id.tcpdiag_dst[0] = sk->daddr;
r->id.tcpdiag_if = sk->bound_dev_if;
*((struct sock **)&r->id.tcpdiag_cookie) = sk;
if (tmo < 0)
tmo = 0;
+ r->id.tcpdiag_sport = tw->sport;
+ r->id.tcpdiag_dport = tw->dport;
+ r->id.tcpdiag_src[0] = tw->rcv_saddr;
+ r->id.tcpdiag_dst[0] = tw->daddr;
r->tcpdiag_state = tw->substate;
r->tcpdiag_timer = 3;
r->tcpdiag_expires = (tmo*1000+HZ-1)/HZ;
return skb->len;
}
+ r->id.tcpdiag_sport = inet->sport;
+ r->id.tcpdiag_dport = inet->dport;
+ r->id.tcpdiag_src[0] = inet->rcv_saddr;
+ r->id.tcpdiag_dst[0] = inet->daddr;
+
#ifdef CONFIG_IPV6
if (r->tcpdiag_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
{
while (len > 0) {
int yes = 1;
+ struct inet_opt *inet = inet_sk(sk);
struct tcpdiag_bc_op *op = (struct tcpdiag_bc_op*)bc;
switch (op->code) {
yes = 0;
break;
case TCPDIAG_BC_S_GE:
- yes = (sk->num >= op[1].no);
+ yes = inet->num >= op[1].no;
break;
case TCPDIAG_BC_S_LE:
- yes = (sk->num <= op[1].no);
+ yes = inet->num <= op[1].no;
break;
case TCPDIAG_BC_D_GE:
- yes = (ntohs(sk->dport) >= op[1].no);
+ yes = ntohs(inet->dport) >= op[1].no;
break;
case TCPDIAG_BC_D_LE:
- yes = (ntohs(sk->dport) <= op[1].no);
+ yes = ntohs(inet->dport) <= op[1].no;
break;
case TCPDIAG_BC_AUTO:
yes = !(sk->userlocks&SOCK_BINDPORT_LOCK);
u32 *addr;
if (cond->port != -1 &&
- cond->port != (op->code == TCPDIAG_BC_S_COND ? sk->num : ntohs(sk->dport))) {
+ cond->port != (op->code == TCPDIAG_BC_S_COND ?
+ inet->num : ntohs(inet->dport))) {
yes = 0;
break;
}
#endif
{
if (op->code == TCPDIAG_BC_S_COND)
- addr = &sk->rcv_saddr;
+ addr = &inet->rcv_saddr;
else
- addr = &sk->daddr;
+ addr = &inet->daddr;
}
if (bitstring_match(addr, cond->addr, cond->prefix_len))
for (sk = tcp_listening_hash[i], num = 0;
sk != NULL;
sk = sk->next, num++) {
+ struct inet_opt *inet = inet_sk(sk);
if (num < s_num)
continue;
if (!(r->tcpdiag_states&TCPF_LISTEN) ||
r->id.tcpdiag_dport)
continue;
- if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
+ if (r->id.tcpdiag_sport != inet->sport &&
+ r->id.tcpdiag_sport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
for (sk = head->chain, num = 0;
sk != NULL;
sk = sk->next, num++) {
+ struct inet_opt *inet = inet_sk(sk);
+
if (num < s_num)
continue;
if (!(r->tcpdiag_states&(1<<sk->state)))
continue;
- if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
+ if (r->id.tcpdiag_sport != inet->sport &&
+ r->id.tcpdiag_sport)
continue;
- if (r->id.tcpdiag_dport != sk->dport && r->id.tcpdiag_dport)
+ if (r->id.tcpdiag_dport != inet->dport && r->id.tcpdiag_dport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
for (sk = tcp_ehash[i+tcp_ehash_size].chain;
sk != NULL;
sk = sk->next, num++) {
+ struct inet_opt *inet = inet_sk(sk);
+
if (num < s_num)
continue;
if (!(r->tcpdiag_states&(1<<sk->zapped)))
continue;
- if (r->id.tcpdiag_sport != sk->sport && r->id.tcpdiag_sport)
+ if (r->id.tcpdiag_sport != inet->sport &&
+ r->id.tcpdiag_sport)
continue;
- if (r->id.tcpdiag_dport != sk->dport && r->id.tcpdiag_dport)
+ if (r->id.tcpdiag_dport != inet->dport &&
+ r->id.tcpdiag_dport)
continue;
if (bc && !tcpdiag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), sk))
continue;
#if FASTRETRANS_DEBUG > 1
static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg)
{
+ struct inet_opt *inet = inet_sk(sk);
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
msg,
- NIPQUAD(sk->daddr), ntohs(sk->dport),
+ NIPQUAD(inet->daddr), ntohs(inet->dport),
tp->snd_cwnd, tp->left_out,
tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out);
}
__set_current_state(TASK_RUNNING);
local_bh_enable();
- if (skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov,
- chunk)) {
- sk->err = EFAULT;
- sk->error_report(sk);
+ if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
+ tp->ucopy.len -= chunk;
+ tp->copied_seq += chunk;
+ eaten = (chunk == skb->len && !th->fin);
}
local_bh_disable();
- tp->ucopy.len -= chunk;
- tp->copied_seq += chunk;
- eaten = (chunk == skb->len && !th->fin);
}
if (eaten <= 0) {
tp->ucopy.iov);
if (!err) {
-update:
- tp->ucopy.len -= chunk;
+ tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
- local_bh_disable();
- return 0;
- }
-
- if (err == -EFAULT) {
- sk->err = EFAULT;
- sk->error_report(sk);
- goto update;
}
local_bh_disable();
tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len &&
sk->lock.users) {
- eaten = 1;
-
- NET_INC_STATS_BH(TCPHPHitsToUser);
-
__set_current_state(TASK_RUNNING);
- if (tcp_copy_to_iovec(sk, skb, tcp_header_len))
- goto csum_error;
-
- __skb_pull(skb,tcp_header_len);
-
- tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- } else {
+ if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
+ __skb_pull(skb, tcp_header_len);
+ tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+ NET_INC_STATS_BH(TCPHPHitsToUser);
+ eaten = 1;
+ }
+ }
+ if (!eaten) {
if (tcp_checksum_complete_user(sk, skb))
goto csum_error;
static __inline__ int tcp_sk_hashfn(struct sock *sk)
{
- __u32 laddr = sk->rcv_saddr;
- __u16 lport = sk->num;
- __u32 faddr = sk->daddr;
- __u16 fport = sk->dport;
+ struct inet_opt *inet = inet_sk(sk);
+ __u32 laddr = inet->rcv_saddr;
+ __u16 lport = inet->num;
+ __u32 faddr = inet->daddr;
+ __u16 fport = inet->dport;
return tcp_hashfn(laddr, lport, faddr, fport);
}
/* Caller must disable local BH processing. */
static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
{
- struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(child->num)];
+ struct tcp_bind_hashbucket *head =
+ &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
static inline void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, unsigned short snum)
{
- sk->num = snum;
+ inet_sk(sk)->num = snum;
if ((sk->bind_next = tb->owners) != NULL)
tb->owners->bind_pprev = &sk->bind_next;
tb->owners = sk;
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
{
+ struct inet_opt *inet = inet_sk(sk);
struct sock *sk2 = tb->owners;
int sk_reuse = sk->reuse;
if (!sk_reuse ||
!sk2->reuse ||
sk2->state == TCP_LISTEN) {
- if (!sk2->rcv_saddr ||
- !sk->rcv_saddr ||
- (sk2->rcv_saddr == sk->rcv_saddr))
+ struct inet_opt *inet2 = inet_sk(sk2);
+ if (!inet2->rcv_saddr ||
+ !inet->rcv_saddr ||
+ (inet2->rcv_saddr == inet->rcv_saddr))
break;
}
}
*/
__inline__ void __tcp_put_port(struct sock *sk)
{
- struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(sk->num)];
+ struct inet_opt *inet = inet_sk(sk);
+ struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
struct tcp_bind_bucket *tb;
spin_lock(&head->lock);
sk->bind_next->bind_pprev = sk->bind_pprev;
*(sk->bind_pprev) = sk->bind_next;
sk->prev = NULL;
- sk->num = 0;
+ inet->num = 0;
if (tb->owners == NULL) {
if (tb->next)
tb->next->pprev = tb->pprev;
hiscore=0;
for(; sk; sk = sk->next) {
- if(sk->num == hnum) {
- __u32 rcv_saddr = sk->rcv_saddr;
+ struct inet_opt *inet = inet_sk(sk);
+
+ if(inet->num == hnum) {
+ __u32 rcv_saddr = inet->rcv_saddr;
score = 1;
if(rcv_saddr) {
read_lock(&tcp_lhash_lock);
sk = tcp_listening_hash[tcp_lhashfn(hnum)];
if (sk) {
- if (sk->num == hnum &&
+ struct inet_opt *inet = inet_sk(sk);
+
+ if (inet->num == hnum &&
sk->next == NULL &&
- (!sk->rcv_saddr || sk->rcv_saddr == daddr) &&
+ (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
!sk->bound_dev_if)
goto sherry_cache;
sk = __tcp_v4_lookup_listener(sk, daddr, hnum, dif);
static int tcp_v4_check_established(struct sock *sk)
{
- u32 daddr = sk->rcv_saddr;
- u32 saddr = sk->daddr;
+ struct inet_opt *inet = inet_sk(sk);
+ u32 daddr = inet->rcv_saddr;
+ u32 saddr = inet->daddr;
int dif = sk->bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
- __u32 ports = TCP_COMBINED_PORTS(sk->dport, sk->num);
- int hash = tcp_hashfn(daddr, sk->num, saddr, sk->dport);
+ __u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
+ int hash = tcp_hashfn(daddr, inet->num, saddr, inet->dport);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
struct sock *sk2, **skp;
struct tcp_tw_bucket *tw;
int tcp_v4_hash_connecting(struct sock *sk)
{
- unsigned short snum = sk->num;
+ unsigned short snum = inet_sk(sk)->num;
struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(snum)];
struct tcp_bind_bucket *tb = (struct tcp_bind_bucket *)sk->prev;
nexthop = inet->opt->faddr;
}
- tmp = ip_route_connect(&rt, nexthop, sk->saddr,
+ tmp = ip_route_connect(&rt, nexthop, inet->saddr,
RT_CONN_FLAGS(sk), sk->bound_dev_if);
if (tmp < 0)
return tmp;
if (buff == NULL)
goto failure;
- if (!sk->saddr)
- sk->saddr = rt->rt_src;
- sk->rcv_saddr = sk->saddr;
+ if (!inet->saddr)
+ inet->saddr = rt->rt_src;
+ inet->rcv_saddr = inet->saddr;
- if (tp->ts_recent_stamp && sk->daddr != daddr) {
+ if (tp->ts_recent_stamp && inet->daddr != daddr) {
/* Reset inherited state */
tp->ts_recent = 0;
tp->ts_recent_stamp = 0;
}
}
- sk->dport = usin->sin_port;
- sk->daddr = daddr;
+ inet->dport = usin->sin_port;
+ inet->daddr = daddr;
if (!tp->write_seq)
- tp->write_seq = secure_tcp_sequence_number(sk->saddr, sk->daddr,
- sk->sport,
+ tp->write_seq = secure_tcp_sequence_number(inet->saddr,
+ inet->daddr,
+ inet->sport,
usin->sin_port);
tp->ext_header_len = 0;
failure:
__sk_dst_reset(sk);
sk->route_caps = 0;
- sk->dport = 0;
+ inet->dport = 0;
return err;
}
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb)
{
+ struct inet_opt *inet = inet_sk(sk);
+
if (skb->ip_summed == CHECKSUM_HW) {
- th->check = ~tcp_v4_check(th, len, sk->saddr, sk->daddr, 0);
+ th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
skb->csum = offsetof(struct tcphdr, check);
} else {
- th->check = tcp_v4_check(th, len, sk->saddr, sk->daddr,
+ th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
csum_partial((char *)th, th->doff<<2, skb->csum));
}
}
newsk->route_caps = dst->dev->features;
newtp = tcp_sk(newsk);
- newsk->daddr = req->af.v4_req.rmt_addr;
- newsk->saddr = req->af.v4_req.loc_addr;
- newsk->rcv_saddr = req->af.v4_req.loc_addr;
newinet = inet_sk(newsk);
+ newinet->daddr = req->af.v4_req.rmt_addr;
+ newinet->rcv_saddr = req->af.v4_req.loc_addr;
+ newinet->saddr = req->af.v4_req.loc_addr;
newinet->opt = req->af.v4_req.opt;
req->af.v4_req.opt = NULL;
newinet->mc_index = tcp_v4_iif(skb);
struct inet_opt *inet = inet_sk(sk);
int err;
struct rtable *rt;
- __u32 old_saddr = sk->saddr;
+ __u32 old_saddr = inet->saddr;
__u32 new_saddr;
- __u32 daddr = sk->daddr;
+ __u32 daddr = inet->daddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
return 0;
if (sysctl_ip_dynaddr > 1) {
- printk(KERN_INFO "tcp_v4_rebuild_header(): shifting sk->saddr "
- "from %d.%d.%d.%d to %d.%d.%d.%d\n",
+ printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
+ "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
NIPQUAD(old_saddr),
NIPQUAD(new_saddr));
}
- sk->saddr = new_saddr;
- sk->rcv_saddr = new_saddr;
+ inet->saddr = new_saddr;
+ inet->rcv_saddr = new_saddr;
/* XXX The only one ugly spot where we need to
* XXX really change the sockets identity after
return 0;
/* Reroute. */
- daddr = sk->daddr;
+ daddr = inet->daddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
- err = ip_route_output(&rt, daddr, sk->saddr,
+ err = ip_route_output(&rt, daddr, inet->saddr,
RT_CONN_FLAGS(sk), sk->bound_dev_if);
if (!err) {
__sk_dst_set(sk, &rt->u.dst);
static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
{
struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = sk->daddr;
- sin->sin_port = sk->dport;
+ sin->sin_addr.s_addr = inet->daddr;
+ sin->sin_port = inet->dport;
}
/* VJ's idea. Save last timestamp seen from this destination
int tcp_v4_remember_stamp(struct sock *sk)
{
+ struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct rtable *rt = (struct rtable*)__sk_dst_get(sk);
struct inet_peer *peer = NULL;
int release_it = 0;
- if (rt == NULL || rt->rt_dst != sk->daddr) {
- peer = inet_getpeer(sk->daddr, 1);
+ if (rt == NULL || rt->rt_dst != inet->daddr) {
+ peer = inet_getpeer(inet->daddr, 1);
release_it = 1;
} else {
if (rt->peer == NULL)
" %02X %08X:%08X %02X:%08X %08X %5d %8d %u %d %p",
i,
req->af.v4_req.loc_addr,
- ntohs(sk->sport),
+ ntohs(inet_sk(sk)->sport),
req->af.v4_req.rmt_addr,
ntohs(req->rmt_port),
TCP_SYN_RECV,
int timer_active;
unsigned long timer_expires;
struct tcp_opt *tp = tcp_sk(sp);
+ struct inet_opt *inet = inet_sk(sp);
- dest = sp->daddr;
- src = sp->rcv_saddr;
- destp = ntohs(sp->dport);
- srcp = ntohs(sp->sport);
+ dest = inet->daddr;
+ src = inet->rcv_saddr;
+ destp = ntohs(inet->dport);
+ srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) {
timer_active = 1;
timer_expires = tp->timeout;
write_unlock(&ehead->lock);
/* Step 3: Put TW into bind hash. Original socket stays there too.
- Note, that any socket with sk->num!=0 MUST be bound in binding
- cache, even if it is closed.
+ Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
+ binding cache, even if it is closed.
*/
- bhead = &tcp_bhash[tcp_bhashfn(sk->num)];
+ bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
spin_lock(&bhead->lock);
tw->tb = (struct tcp_bind_bucket *)sk->prev;
BUG_TRAP(sk->prev!=NULL);
tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
if(tw != NULL) {
+ struct inet_opt *inet = inet_sk(sk);
int rto = (tp->rto<<2) - (tp->rto>>1);
/* Give us an identity. */
- tw->daddr = sk->daddr;
- tw->rcv_saddr = sk->rcv_saddr;
+ tw->daddr = inet->daddr;
+ tw->rcv_saddr = inet->rcv_saddr;
tw->bound_dev_if= sk->bound_dev_if;
- tw->num = sk->num;
+ tw->num = inet->num;
tw->state = TCP_TIME_WAIT;
tw->substate = state;
- tw->sport = sk->sport;
- tw->dport = sk->dport;
+ tw->sport = inet->sport;
+ tw->dport = inet->dport;
tw->family = sk->family;
tw->reuse = sk->reuse;
tw->rcv_wscale = tp->rcv_wscale;
newsk->prev = NULL;
/* Clone the TCP header template */
- newsk->dport = req->rmt_port;
+ inet_sk(newsk)->dport = req->rmt_port;
sock_lock_init(newsk);
bh_lock_sock(newsk);
int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
if(skb != NULL) {
+ struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
int tcp_header_size = tp->tcp_header_len;
skb_set_owner_w(skb, sk);
/* Build TCP header and checksum it. */
- th->source = sk->sport;
- th->dest = sk->dport;
+ th->source = inet->sport;
+ th->dest = inet->dport;
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags);
th->syn = 1;
th->ack = 1;
TCP_ECN_make_synack(req, th);
- th->source = sk->sport;
+ th->source = inet_sk(sk)->sport;
th->dest = req->rmt_port;
TCP_SKB_CB(skb)->seq = req->snt_isn;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
* we cannot allow such beasts to hang infinitely.
*/
#ifdef TCP_DEBUG
- if (net_ratelimit())
+ if (net_ratelimit()) {
+ struct inet_opt *inet = inet_sk(sk);
printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
- NIPQUAD(sk->daddr), htons(sk->dport), sk->num,
- tp->snd_una, tp->snd_nxt);
+ NIPQUAD(inet->daddr), htons(inet->dport),
+ inet->num, tp->snd_una, tp->snd_nxt);
+ }
#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
static int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
+ struct inet_opt *inet = inet_sk(sk);
+
write_lock_bh(&udp_hash_lock);
if (snum == 0) {
int best_size_so_far, best, result, i;
best_size_so_far = 32767;
best = result = udp_port_rover;
for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
- struct sock *sk;
+ struct sock *sk2;
int size;
- sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
- if (!sk) {
+ sk2 = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
+ if (!sk2) {
if (result > sysctl_local_port_range[1])
result = sysctl_local_port_range[0] +
((result - sysctl_local_port_range[0]) &
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk = sk->next) != NULL);
+ } while ((sk2 = sk2->next) != NULL);
best_size_so_far = size;
best = result;
next:;
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
sk2 = sk2->next) {
- if (sk2->num == snum &&
+ struct inet_opt *inet2 = inet_sk(sk2);
+
+ if (inet2->num == snum &&
sk2 != sk &&
sk2->bound_dev_if == sk->bound_dev_if &&
- (!sk2->rcv_saddr ||
- !sk->rcv_saddr ||
- sk2->rcv_saddr == sk->rcv_saddr) &&
+ (!inet2->rcv_saddr ||
+ !inet->rcv_saddr ||
+ inet2->rcv_saddr == inet->rcv_saddr) &&
(!sk2->reuse || !sk->reuse))
goto fail;
}
}
- sk->num = snum;
+ inet->num = snum;
if (sk->pprev == NULL) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
if ((sk->next = *skp) != NULL)
sk->next->pprev = sk->pprev;
*sk->pprev = sk->next;
sk->pprev = NULL;
- sk->num = 0;
+ inet_sk(sk)->num = 0;
sock_prot_dec_use(sk->prot);
__sock_put(sk);
}
int badness = -1;
for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
- if(sk->num == hnum) {
+ struct inet_opt *inet = inet_sk(sk);
+
+ if (inet->num == hnum) {
int score = 0;
- if(sk->rcv_saddr) {
- if(sk->rcv_saddr != daddr)
+ if (inet->rcv_saddr) {
+ if (inet->rcv_saddr != daddr)
continue;
score++;
}
- if(sk->daddr) {
- if(sk->daddr != saddr)
+ if (inet->daddr) {
+ if (inet->daddr != saddr)
continue;
score++;
}
- if(sk->dport) {
- if(sk->dport != sport)
+ if (inet->dport) {
+ if (inet->dport != sport)
continue;
score++;
}
struct sock *s = sk;
unsigned short hnum = ntohs(loc_port);
for(; s; s = s->next) {
- if ((s->num != hnum) ||
- (s->daddr && s->daddr!=rmt_addr) ||
- (s->dport != rmt_port && s->dport != 0) ||
- (s->rcv_saddr && s->rcv_saddr != loc_addr) ||
+ struct inet_opt *inet = inet_sk(s);
+
+ if (inet->num != hnum ||
+ (inet->daddr && inet->daddr != rmt_addr) ||
+ (inet->dport != rmt_port && inet->dport) ||
+ (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
(s->bound_dev_if && s->bound_dev_if != dif))
continue;
break;
} else {
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
- ufh.daddr = sk->daddr;
- ufh.uh.dest = sk->dport;
+ ufh.daddr = inet->daddr;
+ ufh.uh.dest = inet->dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
- ipc.addr = sk->saddr;
- ufh.uh.source = sk->sport;
+ ipc.addr = inet->saddr;
+ ufh.uh.source = inet->sport;
ipc.opt = NULL;
ipc.oif = sk->bound_dev_if;
sk_dst_reset(sk);
- err = ip_route_connect(&rt, usin->sin_addr.s_addr, sk->saddr,
+ err = ip_route_connect(&rt, usin->sin_addr.s_addr, inet->saddr,
RT_CONN_FLAGS(sk), sk->bound_dev_if);
if (err)
return err;
ip_rt_put(rt);
return -EACCES;
}
- if(!sk->saddr)
- sk->saddr = rt->rt_src; /* Update source address */
- if(!sk->rcv_saddr)
- sk->rcv_saddr = rt->rt_src;
- sk->daddr = rt->rt_dst;
- sk->dport = usin->sin_port;
+ if (!inet->saddr)
+ inet->saddr = rt->rt_src; /* Update source address */
+ if (!inet->rcv_saddr)
+ inet->rcv_saddr = rt->rt_src;
+ inet->daddr = rt->rt_dst;
+ inet->dport = usin->sin_port;
sk->state = TCP_ESTABLISHED;
inet->id = jiffies;
int udp_disconnect(struct sock *sk, int flags)
{
+ struct inet_opt *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->state = TCP_CLOSE;
- sk->daddr = 0;
- sk->dport = 0;
+ inet->daddr = 0;
+ inet->dport = 0;
sk->bound_dev_if = 0;
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
- sk->rcv_saddr = 0;
- sk->saddr = 0;
+ inet->rcv_saddr = inet->saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
}
if (!(sk->userlocks&SOCK_BINDPORT_LOCK)) {
sk->prot->unhash(sk);
- sk->sport = 0;
+ inet->sport = 0;
}
sk_dst_reset(sk);
return 0;
static void get_udp_sock(struct sock *sp, char *tmpbuf, int i)
{
+ struct inet_opt *inet = inet_sk(sp);
unsigned int dest, src;
__u16 destp, srcp;
- dest = sp->daddr;
- src = sp->rcv_saddr;
- destp = ntohs(sp->dport);
- srcp = ntohs(sp->sport);
+ dest = inet->daddr;
+ src = inet->rcv_saddr;
+ destp = ntohs(inet->dport);
+ srcp = ntohs(inet->sport);
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
i, src, srcp, dest, destp, sp->state,
inet = inet_sk(sk);
if (SOCK_RAW == sock->type) {
- sk->num = protocol;
+ inet->num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
#endif
MOD_INC_USE_COUNT;
- if (sk->num) {
+ if (inet->num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically shares.
*/
- sk->sport = ntohs(sk->num);
+ inet->sport = ntohs(inet->num);
sk->prot->hash(sk);
}
if (sk->prot->init) {
{
struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
__u32 v4addr = 0;
unsigned short snum;
lock_sock(sk);
/* Check these errors (active socket, double bind). */
- if ((sk->state != TCP_CLOSE) ||
- (sk->num != 0)) {
+ if (sk->state != TCP_CLOSE || inet->num) {
release_sock(sk);
return -EINVAL;
}
}
}
- sk->rcv_saddr = v4addr;
- sk->saddr = v4addr;
+ inet->rcv_saddr = v4addr;
+ inet->saddr = v4addr;
ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
/* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0) {
- sk->rcv_saddr = 0;
- sk->saddr = 0;
+ inet->rcv_saddr = inet->saddr = 0;
memset(&np->rcv_saddr, 0, sizeof(struct in6_addr));
memset(&np->saddr, 0, sizeof(struct in6_addr));
sk->userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->userlocks |= SOCK_BINDPORT_LOCK;
- sk->sport = ntohs(sk->num);
- sk->dport = 0;
- sk->daddr = 0;
+ inet->sport = ntohs(inet->num);
+ inet->dport = 0;
+ inet->daddr = 0;
release_sock(sk);
return 0;
{
struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr;
struct sock *sk = sock->sk;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
sin->sin6_family = AF_INET6;
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
if (peer) {
- if (!sk->dport)
+ if (!inet->dport)
return -ENOTCONN;
if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1)
return -ENOTCONN;
- sin->sin6_port = sk->dport;
+ sin->sin6_port = inet->dport;
memcpy(&sin->sin6_addr, &np->daddr, sizeof(struct in6_addr));
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
memcpy(&sin->sin6_addr, &np->rcv_saddr,
sizeof(struct in6_addr));
- sin->sin6_port = sk->sport;
+ sin->sin6_port = inet->sport;
}
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = sk->bound_dev_if;
struct ip6_ra_chain *ra, *new_ra, **rap;
/* RA packet may be delivered ONLY to IPPROTO_RAW socket */
- if (sk->type != SOCK_RAW || sk->num != IPPROTO_RAW)
+ if (sk->type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW)
return -EINVAL;
new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
if (opt) {
struct tcp_opt *tp = tcp_sk(sk);
if (!((1<<sk->state)&(TCPF_LISTEN|TCPF_CLOSE))
- && sk->daddr != LOOPBACK4_IPV6) {
+ && inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
tp->ext_header_len = opt->opt_flen + opt->opt_nflen;
tcp_sync_mss(sk, tp->pmtu_cookie);
}
static void pndisc_destructor(struct pneigh_entry *n);
static void pndisc_redo(struct sk_buff *skb);
-static struct neigh_ops ndisc_generic_ops =
-{
- AF_INET6,
- NULL,
- ndisc_solicit,
- ndisc_error_report,
- neigh_resolve_output,
- neigh_connected_output,
- dev_queue_xmit,
- dev_queue_xmit
+static struct neigh_ops ndisc_generic_ops = {
+ family: AF_INET6,
+ solicit: ndisc_solicit,
+ error_report: ndisc_error_report,
+ output: neigh_resolve_output,
+ connected_output: neigh_connected_output,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit,
};
-static struct neigh_ops ndisc_hh_ops =
-{
- AF_INET6,
- NULL,
- ndisc_solicit,
- ndisc_error_report,
- neigh_resolve_output,
- neigh_resolve_output,
- dev_queue_xmit,
- dev_queue_xmit
+static struct neigh_ops ndisc_hh_ops = {
+ family: AF_INET6,
+ solicit: ndisc_solicit,
+ error_report: ndisc_error_report,
+ output: neigh_resolve_output,
+ connected_output: neigh_resolve_output,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit,
};
-static struct neigh_ops ndisc_direct_ops =
-{
- AF_INET6,
- NULL,
- NULL,
- NULL,
- dev_queue_xmit,
- dev_queue_xmit,
- dev_queue_xmit,
- dev_queue_xmit
+static struct neigh_ops ndisc_direct_ops = {
+ family: AF_INET6,
+ output: dev_queue_xmit,
+ connected_output: dev_queue_xmit,
+ hh_output: dev_queue_xmit,
+ queue_xmit: dev_queue_xmit,
};
-struct neigh_table nd_tbl =
-{
- NULL,
- AF_INET6,
- sizeof(struct neighbour) + sizeof(struct in6_addr),
- sizeof(struct in6_addr),
- ndisc_hash,
- ndisc_constructor,
- pndisc_constructor,
- pndisc_destructor,
- pndisc_redo,
- "ndisc_cache",
- { NULL, NULL, &nd_tbl, 0, NULL, NULL,
- 30*HZ, 1*HZ, 60*HZ, 30*HZ, 5*HZ, 3, 3, 0, 3, 1*HZ, (8*HZ)/10, 64, 0 },
- 30*HZ, 128, 512, 1024,
+struct neigh_table nd_tbl = {
+ family: AF_INET6,
+ entry_size: sizeof(struct neighbour) + sizeof(struct in6_addr),
+ key_len: sizeof(struct in6_addr),
+ hash: ndisc_hash,
+ constructor: ndisc_constructor,
+ pconstructor: pndisc_constructor,
+ pdestructor: pndisc_destructor,
+ proxy_redo: pndisc_redo,
+ id: "ndisc_cache",
+ parms: {
+ tbl: &nd_tbl,
+ base_reachable_time: 30 * HZ,
+ retrans_time: 1 * HZ,
+ gc_staletime: 60 * HZ,
+ reachable_time: 30 * HZ,
+ delay_probe_time: 5 * HZ,
+ queue_len: 3,
+ ucast_probes: 3,
+ mcast_probes: 3,
+ anycast_delay: 1 * HZ,
+ proxy_delay: (8 * HZ) / 10,
+ proxy_qlen: 64,
+ },
+ gc_interval: 30 * HZ,
+ gc_thresh1: 128,
+ gc_thresh2: 512,
+ gc_thresh3: 1024,
};
#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
static void raw_v6_hash(struct sock *sk)
{
- struct sock **skp = &raw_v6_htable[sk->num & (RAWV6_HTABLE_SIZE - 1)];
+ struct sock **skp = &raw_v6_htable[inet_sk(sk)->num &
+ (RAWV6_HTABLE_SIZE - 1)];
write_lock_bh(&raw_v6_lock);
if ((sk->next = *skp) != NULL)
int addr_type = ipv6_addr_type(loc_addr);
for(s = sk; s; s = s->next) {
- if(s->num == num) {
+ if (inet_sk(s)->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
if (!ipv6_addr_any(&np->daddr) &&
/* This cleans up af_inet6 a bit. -DaveM */
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__u32 v4addr = 0;
}
}
- sk->rcv_saddr = v4addr;
- sk->saddr = v4addr;
+ inet->rcv_saddr = inet->saddr = v4addr;
ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
if (!(addr_type & IPV6_ADDR_MULTICAST))
ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
{
struct ipv6_txoptions opt_space;
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
proto = ntohs(sin6->sin6_port);
if (!proto)
- proto = sk->num;
+ proto = inet->num;
if (proto > 255)
return(-EINVAL);
if (sk->state != TCP_ESTABLISHED)
return(-EINVAL);
- proto = sk->num;
+ proto = inet->num;
daddr = &np->daddr;
fl.fl6_flowlabel = np->flow_label;
}
break;
case SOL_ICMPV6:
- if (sk->num != IPPROTO_ICMPV6)
+ if (inet_sk(sk)->num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval,
optlen);
break;
case SOL_ICMPV6:
- if (sk->num != IPPROTO_ICMPV6)
+ if (inet_sk(sk)->num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval,
optlen);
static void rawv6_close(struct sock *sk, long timeout)
{
- if (sk->num == IPPROTO_RAW)
+ if (inet_sk(sk)->num == IPPROTO_RAW)
ip6_ra_control(sk, -1, NULL);
inet_sock_release(sk);
dest = &np->daddr;
src = &np->rcv_saddr;
destp = 0;
- srcp = sp->num;
+ srcp = inet_sk(sp)->num;
sprintf(tmpbuf,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
{
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *laddr = &np->rcv_saddr;
struct in6_addr *faddr = &np->daddr;
- __u16 lport = sk->num;
- __u16 fport = sk->dport;
+ __u16 lport = inet->num;
+ __u16 fport = inet->dport;
return tcp_v6_hashfn(laddr, lport, faddr, fport);
}
!sk2->reuse ||
sk2->state == TCP_LISTEN) {
/* NOTE: IPv6 tw bucket have different format */
- if (!sk2->rcv_saddr ||
+ if (!inet_sk(sk2)->rcv_saddr ||
addr_type == IPV6_ADDR_ANY ||
!ipv6_addr_cmp(&np->rcv_saddr,
sk2->state != TCP_TIME_WAIT ?
&np2->rcv_saddr :
&((struct tcp_tw_bucket*)sk)->v6_rcv_saddr) ||
(addr_type==IPV6_ADDR_MAPPED && sk2->family==AF_INET &&
- sk->rcv_saddr==sk2->rcv_saddr))
+ inet_sk(sk)->rcv_saddr ==
+ inet_sk(sk2)->rcv_saddr))
break;
}
}
tb->fastreuse = 0;
success:
- sk->num = snum;
+ inet_sk(sk)->num = snum;
if (sk->prev == NULL) {
if ((sk->bind_next = tb->owners) != NULL)
tb->owners->bind_pprev = &sk->bind_next;
read_lock(&tcp_lhash_lock);
sk = tcp_listening_hash[tcp_lhashfn(hnum)];
for(; sk; sk = sk->next) {
- if((sk->num == hnum) && (sk->family == PF_INET6)) {
+ if (inet_sk(sk)->num == hnum && sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
score = 1;
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) {
- if(*((__u32 *)&(sk->dport)) == ports &&
+ /* FIXME: acme: check this... */
+ struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
+
+ if(*((__u32 *)&(tw->dport)) == ports &&
sk->family == PF_INET6) {
- struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
if(!ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
!ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
(!sk->bound_dev_if || sk->bound_dev_if == dif))
static int tcp_v6_check_established(struct sock *sk)
{
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr = &np->rcv_saddr;
struct in6_addr *saddr = &np->daddr;
int dif = sk->bound_dev_if;
- u32 ports = TCP_COMBINED_PORTS(sk->dport, sk->num);
- int hash = tcp_v6_hashfn(daddr, sk->num, saddr, sk->dport);
+ u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
+ int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
struct sock *sk2, **skp;
struct tcp_tw_bucket *tw;
for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {
tw = (struct tcp_tw_bucket*)sk2;
- if(*((__u32 *)&(sk2->dport)) == ports &&
+ if(*((__u32 *)&(tw->dport)) == ports &&
sk2->family == PF_INET6 &&
!ipv6_addr_cmp(&tw->v6_daddr, saddr) &&
!ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&
static int tcp_v6_hash_connecting(struct sock *sk)
{
- unsigned short snum = sk->num;
+ unsigned short snum = inet_sk(sk)->num;
struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(snum)];
struct tcp_bind_bucket *tb = head->chain;
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL;
goto failure;
} else {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
- sk->saddr);
+ inet->saddr);
ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
- sk->rcv_saddr);
+ inet->rcv_saddr);
}
return err;
fl.fl6_src = saddr;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = usin->sin6_port;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.sport = inet->sport;
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
/* set the source address */
ipv6_addr_copy(&np->rcv_saddr, saddr);
ipv6_addr_copy(&np->saddr, saddr);
- sk->rcv_saddr= LOOPBACK4_IPV6;
+ inet->rcv_saddr = LOOPBACK4_IPV6;
tp->ext_header_len = 0;
if (np->opt)
if (buff == NULL)
goto failure;
- sk->dport = usin->sin6_port;
+ inet->dport = usin->sin6_port;
/*
* Init variables
if (!tp->write_seq)
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
- sk->sport, sk->dport);
+ inet->sport,
+ inet->dport);
err = tcp_connect(sk, buff);
if (err == 0)
failure:
__sk_dst_reset(sk);
- sk->dport = 0;
+ inet->dport = 0;
sk->route_caps = 0;
return err;
}
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
+ struct inet_opt *inet = inet_sk(sk);
struct flowi fl;
/* BUGGG_FUTURE: Again, it is not clear how
fl.nl_u.ip6_u.daddr = &np->daddr;
fl.nl_u.ip6_u.saddr = &np->saddr;
fl.oif = sk->bound_dev_if;
- fl.uli_u.ports.dport = sk->dport;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.dport = inet->dport;
+ fl.uli_u.ports.sport = inet->sport;
dst = ip6_route_output(sk, &fl);
} else
fl.fl6_flowlabel = 0;
fl.oif = req->af.v6_req.iif;
fl.uli_u.ports.dport = req->rmt_port;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.sport = inet_sk(sk)->sport;
if (dst == NULL) {
opt = np->opt;
if (newsk == NULL)
return NULL;
+ newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
newtp = tcp_sk(newsk);
ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
- newsk->daddr);
+ newinet->daddr);
ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
- newsk->saddr);
+ newinet->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
fl.fl6_flowlabel = 0;
fl.oif = sk->bound_dev_if;
fl.uli_u.ports.dport = req->rmt_port;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.sport = inet_sk(sk)->sport;
dst = ip6_route_output(sk, &fl);
}
newtp->advmss = dst->advmss;
tcp_initialize_rcv_mss(newsk);
- newsk->daddr = LOOPBACK4_IPV6;
- newsk->saddr = LOOPBACK4_IPV6;
- newsk->rcv_saddr = LOOPBACK4_IPV6;
+ newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
__tcp_v6_hash(newsk);
tcp_inherit_port(sk, newsk);
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
+ struct inet_opt *inet = inet_sk(sk);
struct flowi fl;
fl.proto = IPPROTO_TCP;
fl.nl_u.ip6_u.saddr = &np->saddr;
fl.fl6_flowlabel = np->flow_label;
fl.oif = sk->bound_dev_if;
- fl.uli_u.ports.dport = sk->dport;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.dport = inet->dport;
+ fl.uli_u.ports.sport = inet->sport;
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
static int tcp_v6_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi fl;
struct dst_entry *dst;
fl.fl6_flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
fl.oif = sk->bound_dev_if;
- fl.uli_u.ports.sport = sk->sport;
- fl.uli_u.ports.dport = sk->dport;
+ fl.uli_u.ports.sport = inet->sport;
+ fl.uli_u.ports.dport = inet->dport;
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, &np->daddr, sizeof(struct in6_addr));
- sin6->sin6_port = sk->dport;
+ sin6->sin6_port = inet_sk(sk)->dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
- ntohs(sk->sport),
+ ntohs(inet_sk(sk)->sport),
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
ntohs(req->rmt_port),
__u16 destp, srcp;
int timer_active;
unsigned long timer_expires;
+ struct inet_opt *inet = inet_sk(sp);
struct tcp_opt *tp = tcp_sk(sp);
struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
src = &np->rcv_saddr;
- destp = ntohs(sp->dport);
- srcp = ntohs(sp->sport);
+ destp = ntohs(inet->dport);
+ srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) {
timer_active = 1;
timer_expires = tp->timeout;
best_size_so_far = 32767;
best = result = udp_port_rover;
for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
- struct sock *sk;
+ struct sock *sk2;
int size;
- sk = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
- if (!sk) {
+ sk2 = udp_hash[result & (UDP_HTABLE_SIZE - 1)];
+ if (!sk2) {
if (result > sysctl_local_port_range[1])
result = sysctl_local_port_range[0] +
((result - sysctl_local_port_range[0]) &
do {
if (++size >= best_size_so_far)
goto next;
- } while ((sk = sk->next) != NULL);
+ } while ((sk2 = sk2->next) != NULL);
best_size_so_far = size;
best = result;
next:;
for (sk2 = udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
sk2 != NULL;
sk2 = sk2->next) {
+ struct inet_opt *inet2 = inet_sk(sk2);
struct ipv6_pinfo *np2 = inet6_sk(sk2);
- if (sk2->num == snum &&
+ if (inet2->num == snum &&
sk2 != sk &&
sk2->bound_dev_if == sk->bound_dev_if &&
- (!sk2->rcv_saddr ||
+ (!inet2->rcv_saddr ||
addr_type == IPV6_ADDR_ANY ||
!ipv6_addr_cmp(&np->rcv_saddr, &np2->rcv_saddr) ||
(addr_type == IPV6_ADDR_MAPPED &&
sk2->family == AF_INET &&
- sk->rcv_saddr == sk2->rcv_saddr)) &&
+ inet_sk(sk)->rcv_saddr == inet2->rcv_saddr)) &&
(!sk2->reuse || !sk->reuse))
goto fail;
}
}
- sk->num = snum;
+ inet_sk(sk)->num = snum;
if (sk->pprev == NULL) {
struct sock **skp = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)];
if ((sk->next = *skp) != NULL)
sk->next->pprev = sk->pprev;
*sk->pprev = sk->next;
sk->pprev = NULL;
- sk->num = 0;
+ inet_sk(sk)->num = 0;
sock_prot_dec_use(sk->prot);
__sock_put(sk);
}
read_lock(&udp_hash_lock);
for(sk = udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]; sk != NULL; sk = sk->next) {
- if((sk->num == hnum) &&
- (sk->family == PF_INET6)) {
+ struct inet_opt *inet = inet_sk(sk);
+
+ if (inet->num == hnum && sk->family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
int score = 0;
- if(sk->dport) {
- if(sk->dport != sport)
+ if (inet->dport) {
+ if (inet->dport != sport)
continue;
score++;
}
int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr;
struct in6_addr saddr;
if (err < 0)
return err;
- ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), sk->daddr);
+ ipv6_addr_set(&np->daddr, 0, 0, htonl(0x0000ffff), inet->daddr);
if (ipv6_addr_any(&np->saddr)) {
ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000ffff),
- sk->saddr);
+ inet->saddr);
}
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000ffff),
- sk->rcv_saddr);
+ inet->rcv_saddr);
}
return 0;
}
ipv6_addr_copy(&np->daddr, daddr);
np->flow_label = fl.fl6_flowlabel;
- sk->dport = usin->sin6_port;
+ inet->dport = usin->sin6_port;
/*
* Check for a route to destination an obtain the
fl.fl6_dst = &np->daddr;
fl.fl6_src = &saddr;
fl.oif = sk->bound_dev_if;
- fl.uli_u.ports.dport = sk->dport;
- fl.uli_u.ports.sport = sk->sport;
+ fl.uli_u.ports.dport = inet->dport;
+ fl.uli_u.ports.sport = inet->sport;
if (flowlabel) {
if (flowlabel->opt && flowlabel->opt->srcrt) {
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_copy(&np->rcv_saddr, &saddr);
- sk->rcv_saddr = LOOPBACK4_IPV6;
+ inet->rcv_saddr = LOOPBACK4_IPV6;
}
sk->state = TCP_ESTABLISHED;
}
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
for(; s; s = s->next) {
- if(s->num == num) {
+ struct inet_opt *inet = inet_sk(s);
+
+ if (inet->num == num) {
struct ipv6_pinfo *np = inet6_sk(s);
- if(s->dport) {
- if(s->dport != rmt_port)
+ if (inet->dport) {
+ if (inet->dport != rmt_port)
continue;
}
if (!ipv6_addr_any(&np->daddr) &&
{
struct ipv6_txoptions opt_space;
struct udpv6fakehdr udh;
+ struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct ipv6_txoptions *opt = NULL;
if (sk->state != TCP_ESTABLISHED)
return -ENOTCONN;
- udh.uh.dest = sk->dport;
+ udh.uh.dest = inet->dport;
daddr = &np->daddr;
fl.fl6_flowlabel = np->flow_label;
}
if (opt && opt->srcrt)
udh.daddr = daddr;
- udh.uh.source = sk->sport;
+ udh.uh.source = inet->sport;
udh.uh.len = len < 0x10000 ? htons(len) : 0;
udh.uh.check = 0;
udh.iov = msg->msg_iov;
static void get_udp6_sock(struct sock *sp, char *tmpbuf, int i)
{
+ struct inet_opt *inet = inet_sk(sp);
struct ipv6_pinfo *np = inet6_sk(sp);
struct in6_addr *dest, *src;
__u16 destp, srcp;
dest = &np->daddr;
src = &np->rcv_saddr;
- destp = ntohs(sp->dport);
- srcp = ntohs(sp->sport);
+ destp = ntohs(inet->dport);
+ srcp = ntohs(inet->sport);
sprintf(tmpbuf,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
spinlock_t bind_lock;
char running; /* prot_hook is attached*/
int ifindex; /* bound device */
+ unsigned short num;
struct tpacket_stats stats;
#ifdef CONFIG_PACKET_MULTICAST
struct packet_mclist *mclist;
*/
if (saddr == NULL) {
- ifindex = pkt_sk(sk)->ifindex;
- proto = sk->num;
+ struct packet_opt *po = pkt_sk(sk);
+
+ ifindex = po->ifindex;
+ proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
po->running = 0;
}
- sk->num = protocol;
+ po->num = protocol;
po->prot_hook.type = protocol;
po->prot_hook.dev = dev;
dev = dev_get_by_name(name);
if (dev) {
- err = packet_do_bind(sk, dev, sk->num);
+ err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
dev_put(dev);
}
return err;
if (dev == NULL)
goto out;
}
- err = packet_do_bind(sk, dev, sll->sll_protocol ? : sk->num);
+ err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
if (dev)
dev_put(dev);
goto out_free;
memset(po, 0, sizeof(*po));
sk->family = PF_PACKET;
- sk->num = protocol;
+ po->num = protocol;
sk->destruct = packet_sock_destruct;
atomic_inc(&packet_socks_nr);
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
- sll->sll_protocol = sk->num;
+ sll->sll_protocol = po->num;
dev = dev_get_by_index(po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
break;
case NETDEV_UP:
spin_lock(&po->bind_lock);
- if (dev->ifindex == po->ifindex && sk->num && po->running==0) {
+ if (dev->ifindex == po->ifindex && po->num &&
+ !po->running) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
s,
atomic_read(&s->refcnt),
s->type,
- ntohs(s->num),
+ ntohs(po->num),
po->ifindex,
po->running,
atomic_read(&s->rmem_alloc),
/* Register socket with portmapper */
if (*errp >= 0 && pmap_register)
- *errp = svc_register(serv, inet->protocol, ntohs(inet->sport));
+ *errp = svc_register(serv, inet->protocol,
+ ntohs(inet_sk(inet)->sport));
if (*errp < 0) {
inet->user_data = NULL;