int iif;
};
-static __inline__ dn_address dn_eth2dn(unsigned char *ethaddr)
+static inline dn_address dn_eth2dn(unsigned char *ethaddr)
{
return ethaddr[4] | (ethaddr[5] << 8);
}
-static __inline__ dn_address dn_saddr2dn(struct sockaddr_dn *saddr)
+static inline dn_address dn_saddr2dn(struct sockaddr_dn *saddr)
{
return *(dn_address *)saddr->sdn_nodeaddr;
}
-static __inline__ void dn_dn2eth(unsigned char *ethaddr, dn_address addr)
+static inline void dn_dn2eth(unsigned char *ethaddr, dn_address addr)
{
ethaddr[0] = 0xAA;
ethaddr[1] = 0x00;
ethaddr[5] = (unsigned char)(addr >> 8);
}
+static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
+{
+ fl->uli_u.dnports.sport = scp->addrloc;
+ fl->uli_u.dnports.dport = scp->addrrem;
+ fl->uli_u.dnports.objnum = scp->addr.sdn_objnum;
+ if (fl->uli_u.dnports.objnum == 0) {
+ fl->uli_u.dnports.objnamel = scp->addr.sdn_objnamel;
+ memcpy(fl->uli_u.dnports.objname, scp->addr.sdn_objname, 16);
+ }
+}
+
+extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
+
#define DN_MENUVER_ACC 0x01
#define DN_MENUVER_USR 0x02
#define DN_MENUVER_PRX 0x04
struct dn_ifaddr *ifa_next;
struct dn_dev *ifa_dev;
dn_address ifa_local;
+ dn_address ifa_address;
unsigned char ifa_flags;
unsigned char ifa_scope;
char ifa_label[IFNAMSIZ];
extern struct net_device *dn_dev_get_default(void);
extern int dn_dev_bind_default(dn_address *addr);
-static __inline__ int dn_dev_islocal(struct net_device *dev, dn_address addr)
+extern int register_dnaddr_notifier(struct notifier_block *nb);
+extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+
+static inline int dn_dev_islocal(struct net_device *dev, dn_address addr)
{
struct dn_dev *dn_db = dev->dn_ptr;
struct dn_ifaddr *ifa;
#ifndef _NET_DN_FIB_H
#define _NET_DN_FIB_H
-#include <linux/config.h>
-
-#ifdef CONFIG_DECNET_ROUTER
-
-#include <linux/rtnetlink.h>
-
struct dn_kern_rta
{
void *rta_dst;
struct rta_cacheinfo *rta_ci;
};
-struct dn_fib_key {
- dn_address src;
- dn_address dst;
- int iif;
- int oif;
- u32 fwmark;
- unsigned char scope;
-};
-
struct dn_fib_res {
struct dn_fib_rule *r;
struct dn_fib_info *fi;
unsigned fib_flags;
int fib_protocol;
dn_address fib_prefsrc;
- u32 fib_priority;
+ __u32 fib_priority;
+ __u32 fib_metrics[RTAX_MAX];
+#define dn_fib_mtu fib_metrics[RTAX_MTU-1]
+#define dn_fib_window fib_metrics[RTAX_WINDOW-1]
+#define dn_fib_rtt fib_metrics[RTAX_RTT-1]
+#define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1]
int fib_nhs;
int fib_power;
struct dn_fib_nh fib_nh[0];
-#define fib_dev fib_nh[0].nh_dev
+#define dn_fib_dev fib_nh[0].nh_dev
};
-#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
+#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+
+#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
struct dn_kern_rta *rta, struct nlmsghdr *n,
struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct dn_fib_key *key,
+ int (*lookup)(struct dn_fib_table *t, const struct flowi *fl,
struct dn_fib_res *res);
int (*flush)(struct dn_fib_table *t);
#ifdef CONFIG_PROC_FS
unsigned char data[0];
};
-
+#ifdef CONFIG_DECNET_ROUTER
/*
* dn_fib.c
*/
struct dn_kern_rta *rta,
const struct nlmsghdr *nlh, int *errp);
extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct dn_fib_key *key, struct dn_fib_res *res);
+ const struct flowi *fl,
+ struct dn_fib_res *res);
extern void dn_fib_release_info(struct dn_fib_info *fi);
extern u16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct dn_fib_key *key,
+extern void dn_fib_select_multipath(const struct flowi *fl,
struct dn_fib_res *res);
extern int dn_fib_sync_down(dn_address local, struct net_device *dev,
int force);
extern void dn_fib_rules_init(void);
extern void dn_fib_rules_cleanup(void);
extern void dn_fib_rule_put(struct dn_fib_rule *);
-extern int dn_fib_lookup(struct dn_fib_key *key, struct dn_fib_res *res);
+extern __u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags);
+extern unsigned dnet_addr_type(__u16 addr);
+extern int dn_fib_lookup(const struct flowi *fl, struct dn_fib_res *res);
/*
* rtnetlink interface
extern int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb);
-#define DN_NUM_TABLES 255
-#define DN_MIN_TABLE 1
-#define DN_DEFAULT_TABLE 1
-#define DN_L1_TABLE 1
-#define DN_L2_TABLE 2
-
extern void dn_fib_free_info(struct dn_fib_info *fi);
-static __inline__ void dn_fib_info_put(struct dn_fib_info *fi)
+static inline void dn_fib_info_put(struct dn_fib_info *fi)
{
if (atomic_dec_and_test(&fi->fib_clntref))
dn_fib_free_info(fi);
}
-static __inline__ void dn_fib_res_put(struct dn_fib_res *res)
+static inline void dn_fib_res_put(struct dn_fib_res *res)
{
if (res->fi)
dn_fib_info_put(res->fi);
dn_fib_rule_put(res->r);
}
-static __inline__ u16 dnet_make_mask(int n)
+extern struct dn_fib_table *dn_fib_tables[];
+
+#else /* Endnode */
+
+#define dn_fib_lookup(fl, res) (-ESRCH)
+#define dn_fib_info_put(fi) do { } while(0)
+#define dn_fib_select_multipath(fl, res) do { } while(0)
+#define dn_fib_rules_policy(saddr,res,flags) (0)
+#define dn_fib_res_put(res) do { } while(0)
+
+#endif /* CONFIG_DECNET_ROUTER */
+
+static inline u16 dnet_make_mask(int n)
{
if (n)
return htons(~((1<<(16-n))-1));
return 0;
}
-#endif /* CONFIG_DECNET_ROUTER */
-
#endif /* _NET_DN_FIB_H */
extern void dn_neigh_init(void);
extern void dn_neigh_cleanup(void);
-extern struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, void *ptr);
+extern struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr);
extern int dn_neigh_router_hello(struct sk_buff *skb);
extern int dn_neigh_endnode_hello(struct sk_buff *skb);
extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
return atomic_read(&sk->rmem_alloc) > (sk->rcvbuf >> 1);
}
+#define DN_MAX_NSP_DATA_HEADER (11)
+
#endif /* _NET_DN_NSP_H */
*******************************************************************************/
extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri);
-extern int dn_route_output(struct dst_entry **pprt, dn_address dst, dn_address src, int flags);
+extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern void dn_rt_cache_flush(int delay);
#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
/*
- * The key structure is what we used to look up the route.
+ * The fl structure is what we used to look up the route.
* The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
- * except for local input routes, where the rt_saddr = key.daddr and
- * rt_daddr = key.saddr to allow the route to be used for returning
+ * except for local input routes, where the rt_saddr = fl.fld_dst and
+ * rt_daddr = fl.fld_src to allow the route to be used for returning
* packets to the originating host.
*/
struct dn_route {
struct dst_entry dst;
struct dn_route *rt_next;
} u;
- struct {
- unsigned short saddr;
- unsigned short daddr;
- int iif;
- int oif;
- u32 fwmark;
- } key;
- unsigned short rt_saddr;
- unsigned short rt_daddr;
- unsigned char rt_type;
- unsigned char rt_scope;
- unsigned char rt_protocol;
- unsigned char rt_table;
+
+ __u16 rt_saddr;
+ __u16 rt_daddr;
+ __u16 rt_gateway;
+ __u16 __padding;
+ __u16 rt_src_map;
+ __u16 rt_dst_map;
+
+ unsigned rt_flags;
+ unsigned rt_type;
+
+ struct flowi fl;
};
extern void dn_route_init(void);
kfree_skb(skb);
}
-static inline void dn_nsp_send(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- struct dn_scp *scp = DN_SK(sk);
- struct dst_entry *dst;
-
- skb->h.raw = skb->data;
- scp->stamp = jiffies;
-
- if ((dst = sk->dst_cache) && !dst->obsolete) {
-try_again:
- skb->dst = dst_clone(dst);
- dst_output(skb);
- return;
- }
-
- dst_release(xchg(&sk->dst_cache, NULL));
-
- if (dn_route_output(&sk->dst_cache, dn_saddr2dn(&scp->peer), dn_saddr2dn(&scp->addr), 0) == 0) {
- dst = sk->dst_cache;
- goto try_again;
- }
-
- sk->err = EHOSTUNREACH;
- if (!test_bit(SOCK_DEAD, &sk->flags))
- sk->state_change(sk);
-}
-
#endif /* _NET_DN_ROUTE_H */
struct in6_addr * saddr;
__u32 flowlabel;
} ip6_u;
+
+ struct {
+ __u16 daddr;
+ __u16 saddr;
+ __u32 fwmark;
+ __u8 scope;
+ } dn_u;
} nl_u;
+#define fld_dst nl_u.dn_u.daddr
+#define fld_src nl_u.dn_u.saddr
+#define fld_fwmark nl_u.dn_u.fwmark
+#define fld_scope nl_u.dn_u.scope
#define fl6_dst nl_u.ip6_u.daddr
#define fl6_src nl_u.ip6_u.saddr
#define fl6_flowlabel nl_u.ip6_u.flowlabel
__u8 code;
} icmpt;
+ struct {
+ __u16 sport;
+ __u16 dport;
+ __u8 objnum;
+ __u8 objnamel; /* Not 16 bits since max val is 16 */
+ __u8 objname[16]; /* Not zero terminated */
+ } dnports;
+
__u32 spi;
} uli_u;
#define fl_ip_sport uli_u.ports.sport
* schedule();
* SOCK_SLEEP_POST(sk)
*
+ * N.B. These are now obsolete and were, afaik, only ever used in DECnet
+ * and when the last use of them in DECnet has gone, I'm intending to
+ * remove them.
*/
#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
o Start to hack together user level software and add more DECnet support
in ifconfig for example.
- o Test adding/deleting of routes
-
- o Test route lookup
-
- o Test /proc/net/decnet_route route listing works correctly (maybe I'll
- change the format of this file... atm its very similar to the IPv4 route
- file)
-
o Find all the commonality between DECnet and IPv4 routing code and extract
it into a small library of routines. [probably a project for 2.7.xx]
- o Test ip_gre tunneling works... it did the last time I tested it and it
- will have to if I'm to test routing properly.
-
o Add the routing message grabbing netfilter module [written, tested,
awaiting merge]
- o Add perfect socket hashing - an idea suggested by Paul Koning [part written,
- awaiting debugging and merge]
+ o Add perfect socket hashing - an idea suggested by Paul Koning. Currently
+ we have a half-way house scheme which seems to work reasonably well, but
+ the full scheme is still worth implementing, its not not top of my list
+ right now.
o Add session control message flow control
o AIO for DECnet
+ o Eliminate dn_db->parms.blksize
+
#include <linux/netfilter.h>
#include <net/sock.h>
#include <net/tcp.h>
+#include <net/flow.h>
#include <asm/system.h>
#include <asm/ioctls.h>
#include <linux/mm.h>
#define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
#define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
+
static kmem_cache_t *dn_sk_cachep;
static struct proto_ops dn_proto_ops;
static rwlock_t dn_hash_lock = RW_LOCK_UNLOCKED;
if (hash == 0) {
hash = addr->sdn_objnamel;
- for(i = 0; i < addr->sdn_objnamel; i++) {
+ for(i = 0; i < dn_ntohs(addr->sdn_objnamel); i++) {
hash ^= addr->sdn_objname[i];
hash ^= (hash << 3);
}
scp->services_loc = 1 | NSP_FC_NONE;
scp->info_rem = 0;
scp->info_loc = 0x03; /* NSP version 4.1 */
- scp->segsize_rem = 230; /* Default: Updated by remote segsize */
- scp->segsize_loc = 1450; /* Best guess for ethernet */
+ scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
scp->nonagle = 0;
scp->multi_ireq = 1;
scp->accept_mode = ACC_IMMED;
return -EINVAL;
#if 1
- if ((!capable(CAP_NET_BIND_SERVICE) && saddr->sdn_objnum) ||
- (saddr->sdn_flags & SDF_WILD))
+ if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
+ (saddr->sdn_flags & SDF_WILD)))
return -EACCES;
#else
/*
static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation)
{
struct dn_scp *scp = DN_SK(sk);
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
int err;
if (scp->state != DN_CR)
return -EINVAL;
scp->state = DN_CC;
+ scp->segsize_loc = dst_path_metric(__sk_dst_get(sk), RTAX_ADVMSS);
dn_send_conn_conf(sk, allocation);
- add_wait_queue(sk->sleep, &wait);
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
- set_current_state(TASK_INTERRUPTIBLE);
release_sock(sk);
if (scp->state == DN_CC)
*timeo = schedule_timeout(*timeo);
err = -EAGAIN;
if (!*timeo)
break;
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
+ }
+ finish_wait(sk->sleep, &wait);
+ if (err == 0) {
+ sk->socket->state = SS_CONNECTED;
+ } else if (scp->state != DN_CC) {
+ sk->socket->state = SS_UNCONNECTED;
}
- remove_wait_queue(sk->sleep, &wait);
- current->state = TASK_RUNNING;
return err;
}
static int dn_wait_run(struct sock *sk, long *timeo)
{
struct dn_scp *scp = DN_SK(sk);
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
int err = 0;
if (scp->state == DN_RUN)
if (!*timeo)
return -EALREADY;
- add_wait_queue(sk->sleep, &wait);
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
- set_current_state(TASK_INTERRUPTIBLE);
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
*timeo = schedule_timeout(*timeo);
err = -ETIMEDOUT;
if (!*timeo)
break;
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
}
- remove_wait_queue(sk->sleep, &wait);
- current->state = TASK_RUNNING;
+ finish_wait(sk->sleep, &wait);
out:
if (err == 0) {
sk->socket->state = SS_CONNECTED;
+ } else if (scp->state != DN_CI && scp->state != DN_CC) {
+ sk->socket->state = SS_UNCONNECTED;
}
return err;
}
struct socket *sock = sk->socket;
struct dn_scp *scp = DN_SK(sk);
int err = -EISCONN;
+ struct flowi fl;
if (sock->state == SS_CONNECTED)
goto out;
memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
err = -EHOSTUNREACH;
- if (dn_route_output(&sk->dst_cache, dn_saddr2dn(&scp->peer),
- dn_saddr2dn(&scp->addr), flags & MSG_TRYHARD) < 0)
+ memset(&fl, 0, sizeof(fl));
+ fl.oif = sk->bound_dev_if;
+ fl.fld_dst = dn_saddr2dn(&scp->peer);
+ fl.fld_src = dn_saddr2dn(&scp->addr);
+ dn_sk_ports_copy(&fl, scp);
+ if (dn_route_output_sock(&sk->dst_cache, &fl, sk, flags) < 0)
goto out;
-
+ sk->route_caps = sk->dst_cache->dev->features;
sock->state = SS_CONNECTING;
scp->state = DN_CI;
+ scp->segsize_loc = dst_path_metric(sk->dst_cache, RTAX_ADVMSS);
dn_nsp_send_conninit(sk, NSP_CI);
err = -EINPROGRESS;
static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct sk_buff *skb = NULL;
int err = 0;
- add_wait_queue_exclusive(sk->sleep, &wait);
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
for(;;) {
- set_current_state(TASK_INTERRUPTIBLE);
release_sock(sk);
skb = skb_dequeue(&sk->receive_queue);
if (skb == NULL) {
err = -EAGAIN;
if (!*timeo)
break;
+ prepare_to_wait(sk->sleep, &wait, TASK_INTERRUPTIBLE);
}
- remove_wait_queue(sk->sleep, &wait);
- current->state = TASK_RUNNING;
+ finish_wait(sk->sleep, &wait);
+
return skb == NULL ? ERR_PTR(err) : skb;
}
release_sock(sk);
return val;
-#ifdef CONFIG_DECNET_ROUTER
- case SIOCADDRT:
- case SIOCDELRT:
- return dn_fib_ioctl(sock, cmd, arg);
-#endif /* CONFIG_DECNET_ROUTER */
-
case TIOCOUTQ:
amount = sk->sndbuf - atomic_read(&sk->wmem_alloc);
if (amount < 0)
return 0;
}
+/*
+ * The DECnet spec requires the the "routing layer" accepts packets which
+ * are at least 230 bytes in size. This excludes any headers which the NSP
+ * layer might add, so we always assume that we'll be using the maximal
+ * length header on data packets. The variation in length is due to the
+ * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
+ * make much practical difference.
+ */
+unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
+{
+ unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
+ if (dev) {
+ struct dn_dev *dn_db = dev->dn_ptr;
+ mtu -= LL_RESERVED_SPACE(dev);
+ if (dn_db->use_long)
+ mtu -= 21;
+ else
+ mtu -= 6;
+ mtu -= DN_MAX_NSP_DATA_HEADER;
+ } else {
+ /*
+ * 21 = long header, 16 = guess at MAC header length
+ */
+ mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
+ }
+ if (mtu > mss)
+ mss = mtu;
+ return mss;
+}
+
+static inline unsigned int dn_current_mss(struct sock *sk, int flags)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ struct dn_scp *scp = DN_SK(sk);
+ int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
+
+ /* Other data messages are limited to 16 bytes per packet */
+ if (flags & MSG_OOB)
+ return 16;
+
+ /* This works out the maximum size of segment we can send out */
+ if (dst) {
+ u32 mtu = dst_pmtu(dst);
+ mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
+ }
+
+ return mss_now;
+}
+
+static int dn_error(struct sock *sk, int flags, int err)
+{
+ if (err == -EPIPE)
+ err = sock_error(sk) ? : -EPIPE;
+ if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ return err;
+}
+
static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, int size)
{
struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
struct sk_buff *skb = NULL;
struct dn_skb_cb *cb;
- unsigned char msgflg;
- unsigned char *ptr;
- unsigned short ack;
int len;
unsigned char fctype;
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
return -EINVAL;
+ /*
+ * The only difference between stream sockets and sequenced packet
+ * sockets is that the stream sockets always behave as if MSG_EOR
+ * has been set.
+ */
+ if (sock->type == SOCK_STREAM) {
+ if (flags & MSG_EOR)
+ return -EINVAL;
+ flags |= MSG_EOR;
+ }
+
lock_sock(sk);
err = dn_check_state(sk, addr, addr_len, &timeo, flags);
if (err)
- goto out;
+ goto out_err;
if (sk->shutdown & SEND_SHUTDOWN) {
- if (!(flags & MSG_NOSIGNAL))
- send_sig(SIGPIPE, current, 0);
err = -EPIPE;
- goto out;
+ goto out_err;
}
if ((flags & MSG_TRYHARD) && sk->dst_cache)
mss = scp->segsize_rem;
fctype = scp->services_rem & NSP_FC_MASK;
- if (sk->dst_cache && sk->dst_cache->neighbour) {
- struct dn_neigh *dn = (struct dn_neigh *)sk->dst_cache->neighbour;
- if (dn->blksize < (mss + 11))
- mss = dn->blksize - 11;
- }
-
- /*
- * The only difference between SEQPACKET & STREAM sockets under DECnet
- * is that SEQPACKET sockets set the MSG_EOR flag for the last
- * session control message segment.
- */
+ mss = dn_current_mss(sk, flags);
if (flags & MSG_OOB) {
- mss = 16;
queue = &scp->other_xmit_queue;
if (size > mss) {
err = -EMSGSIZE;
cb = DN_SKB_CB(skb);
- ptr = skb_put(skb, 9);
+ skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
}
if (flags & MSG_OOB) {
- cb->segnum = scp->numoth;
- seq_add(&scp->numoth, 1);
- msgflg = 0x30;
- ack = (scp->numoth_rcv & 0x0FFF) | 0x8000;
- scp->ackxmt_oth = scp->numoth_rcv;
+ cb->nsp_flags = 0x30;
if (fctype != NSP_FC_NONE)
scp->flowrem_oth--;
} else {
- cb->segnum = scp->numdat;
- seq_add(&scp->numdat, 1);
- msgflg = 0x00;
- if (sock->type == SOCK_STREAM)
- msgflg = 0x60;
+ cb->nsp_flags = 0x00;
if (scp->seg_total == 0)
- msgflg |= 0x20;
+ cb->nsp_flags |= 0x20;
scp->seg_total += len;
if (((sent + len) == size) && (flags & MSG_EOR)) {
- msgflg |= 0x40;
+ cb->nsp_flags |= 0x40;
scp->seg_total = 0;
if (fctype == NSP_FC_SCMC)
scp->flowrem_dat--;
}
- ack = (scp->numdat_rcv & 0x0FFF) | 0x8000;
- scp->ackxmt_dat = scp->numdat_rcv;
if (fctype == NSP_FC_SRC)
scp->flowrem_dat--;
}
- *ptr++ = msgflg;
- *(__u16 *)ptr = scp->addrrem;
- ptr += 2;
- *(__u16 *)ptr = scp->addrloc;
- ptr += 2;
- *(__u16 *)ptr = dn_htons(ack);
- ptr += 2;
- *(__u16 *)ptr = dn_htons(cb->segnum);
-
sent += len;
dn_nsp_queue_xmit(sk, skb, sk->allocation, flags & MSG_OOB);
skb = NULL;
release_sock(sk);
return sent ? sent : err;
+
+out_err:
+ err = dn_error(sk, flags, err);
+ release_sock(sk);
+ return err;
}
static int dn_device_event(struct notifier_block *this, unsigned long event,
.sendpage = sock_no_sendpage,
};
-#ifdef CONFIG_SYSCTL
void dn_register_sysctl(void);
void dn_unregister_sysctl(void);
-#endif
MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
MODULE_LICENSE("GPL");
-static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.40s (C) 1995-2002 Linux DECnet Project Team\n";
+static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.67s (C) 1995-2003 Linux DECnet Project Team\n";
static int __init decnet_init(void)
{
dn_fib_init();
#endif /* CONFIG_DECNET_ROUTER */
-#ifdef CONFIG_SYSCTL
dn_register_sysctl();
-#endif /* CONFIG_SYSCTL */
/*
* Prevent DECnet module unloading until its fixed properly.
sock_unregister(AF_DECnet);
dev_remove_pack(&dn_dix_packet_type);
-#ifdef CONFIG_SYSCTL
dn_unregister_sysctl();
-#endif /* CONFIG_SYSCTL */
unregister_netdevice_notifier(&dn_dev_notifier);
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/sysctl.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_dev.h>
#include <net/dn_route.h>
static rwlock_t dndev_lock = RW_LOCK_UNLOCKED;
static struct net_device *decnet_default_device;
+static struct notifier_block *dnaddr_chain;
static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
static void dn_dev_delete(struct net_device *dev);
}
rtmsg_ifa(RTM_DELADDR, ifa1);
-
+ notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
if (destroy) {
dn_dev_free_ifa(ifa1);
dn_db->ifa_list = ifa;
rtmsg_ifa(RTM_NEWADDR, ifa);
+ notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
return 0;
}
dn_dev_del_ifa(dn_db, ifap, 0);
}
- ifa->ifa_local = dn_saddr2dn(sdn);
+ ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
ret = dn_dev_set_ifa(dev, ifa);
}
if ((ifa = dn_dev_alloc_ifa()) == NULL)
return -ENOBUFS;
+ if (!rta[IFA_ADDRESS - 1])
+ rta[IFA_ADDRESS - 1] = rta[IFA_LOCAL - 1];
memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL-1]), 2);
+ memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS-1]), 2);
ifa->ifa_flags = ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = dn_db;
ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
- RTA_PUT(skb, IFA_LOCAL, 2, &ifa->ifa_local);
+ if (ifa->ifa_address)
+ RTA_PUT(skb, IFA_ADDRESS, 2, &ifa->ifa_address);
+ if (ifa->ifa_local)
+ RTA_PUT(skb, IFA_LOCAL, 2, &ifa->ifa_local);
if (ifa->ifa_label[0])
RTA_PUT(skb, IFA_LABEL, IFNAMSIZ, &ifa->ifa_label);
nlh->nlmsg_len = skb->tail - b;
s_idx = cb->args[0];
s_dn_idx = dn_idx = cb->args[1];
read_lock(&dev_base_lock);
- for(dev = dev_base, idx = 0; dev; dev = dev->next) {
- if ((dn_db = dev->dn_ptr) == NULL)
- continue;
- idx++;
+ for(dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (idx > s_idx)
if (dn_idx < s_dn_idx)
continue;
- if (dn_dev_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR) <= 0)
+ if (dn_dev_fill_ifaddr(skb, ifa,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWADDR) <= 0)
goto done;
}
}
}
-#ifdef CONFIG_DECNET_ROUTER
-
#define DRDELAY (5 * HZ)
static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
else
dn_send_router_hello(dev, ifa);
}
-#else
-static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
-{
- dn_send_endnode_hello(dev, ifa);
-}
-#endif
#if 0
static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
if ((ifa = dn_dev_alloc_ifa()) == NULL)
return;
- ifa->ifa_local = addr;
+ ifa->ifa_local = ifa->ifa_address = addr;
ifa->ifa_flags = 0;
ifa->ifa_scope = RT_SCOPE_UNIVERSE;
strcpy(ifa->ifa_label, dev->name);
rtnl_unlock();
}
+int register_dnaddr_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&dnaddr_chain, nb);
+}
+
+int unregister_dnaddr_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&dnaddr_chain, nb);
+}
#ifdef CONFIG_DECNET_SIOCGIFCONF
/*
static struct rtnetlink_link dnet_rtnetlink_table[RTM_MAX-RTM_BASE+1] =
{
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, },
-
- { dn_dev_rtm_newaddr, NULL, },
- { dn_dev_rtm_deladdr, NULL, },
- { NULL, dn_dev_dump_ifaddr, },
- { NULL, NULL, },
+ [4] = { .doit = dn_dev_rtm_newaddr, },
+ [5] = { .doit = dn_dev_rtm_deladdr, },
+ [6] = { .dumpit = dn_dev_dump_ifaddr, },
#ifdef CONFIG_DECNET_ROUTER
- { dn_fib_rtm_newroute, NULL, },
- { dn_fib_rtm_delroute, NULL, },
- { dn_cache_getroute, dn_fib_dump, },
- { NULL, NULL, },
+ [8] = { .doit = dn_fib_rtm_newroute, },
+ [9] = { .doit = dn_fib_rtm_delroute, },
+ [10] = { .doit = dn_cache_getroute, .dumpit = dn_fib_dump, },
+ [16] = { .doit = dn_fib_rtm_newrule, },
+ [17] = { .doit = dn_fib_rtm_delrule, },
+ [18] = { .dumpit = dn_fib_dump_rules, },
#else
- { NULL, NULL, },
- { NULL, NULL, },
- { dn_cache_getroute, dn_cache_dump, },
- { NULL, NULL, },
+ [10] = { .doit = dn_cache_getroute, .dumpit = dn_cache_dump, },
#endif
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, },
-#ifdef CONFIG_DECNET_ROUTER
- { dn_fib_rtm_newrule, NULL, },
- { dn_fib_rtm_delrule, NULL, },
- { NULL, dn_fib_dump_rules, },
- { NULL, NULL, }
-#else
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, },
- { NULL, NULL, }
-#endif
};
#ifdef MODULE
* Alexey Kuznetsov : SMP locking changes
* Steve Whitehouse : Rewrote it... Well to be more correct, I
* copied most of it from the ipv4 fib code.
+ * Steve Whitehouse : Updated it in style and fixed a few bugs
+ * which were fixed in the ipv4 code since
+ * this code was copied from it.
*
*/
#include <linux/config.h>
#include <asm/uaccess.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_route.h>
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
#include <net/dn_dev.h>
+#define RT_MIN_TABLE 1
#define for_fib_info() { struct dn_fib_info *fi;\
for(fi = dn_fib_info_list; fi; fi = fi->fib_next)
extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
+static spinlock_t dn_fib_multipath_lock = SPIN_LOCK_UNLOCKED;
static struct dn_fib_info *dn_fib_info_list;
static rwlock_t dn_fib_info_lock = RW_LOCK_UNLOCKED;
int dn_fib_info_cnt;
int error;
u8 scope;
} dn_fib_props[RTA_MAX+1] = {
- { 0, RT_SCOPE_NOWHERE }, /* RTN_UNSPEC */
- { 0, RT_SCOPE_UNIVERSE }, /* RTN_UNICAST */
- { 0, RT_SCOPE_HOST }, /* RTN_LOCAL */
- { -EINVAL, RT_SCOPE_NOWHERE }, /* RTN_BROADCAST */
- { -EINVAL, RT_SCOPE_NOWHERE }, /* RTN_ANYCAST */
- { -EINVAL, RT_SCOPE_NOWHERE }, /* RTN_MULTICAST */
- { -EINVAL, RT_SCOPE_UNIVERSE }, /* RTN_BLACKHOLE */
- { -EHOSTUNREACH, RT_SCOPE_UNIVERSE }, /* RTN_UNREACHABLE */
- { -EACCES, RT_SCOPE_UNIVERSE }, /* RTN_PROHIBIT */
- { -EAGAIN, RT_SCOPE_UNIVERSE }, /* RTN_THROW */
- { -EINVAL, RT_SCOPE_NOWHERE }, /* RTN_NAT */
- { -EINVAL, RT_SCOPE_NOWHERE } /* RTN_XRESOLVE */
+ { .error = 0, .scope = RT_SCOPE_NOWHERE }, /* RTN_UNSPEC */
+ { .error = 0, .scope = RT_SCOPE_UNIVERSE }, /* RTN_UNICAST */
+ { .error = 0, .scope = RT_SCOPE_HOST }, /* RTN_LOCAL */
+ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, /* RTN_BROADCAST */
+ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, /* RTN_ANYCAST */
+ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE }, /* RTN_MULTICAST */
+ { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE }, /* RTN_BLACKHOLE */
+ { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE }, /* RTN_UNREACHABLE */
+ { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE }, /* RTN_PROHIBIT */
+ { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE }, /* RTN_THROW */
+ { .error = 0, .scope = RT_SCOPE_NOWHERE }, /* RTN_NAT */
+ { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE } /* RTN_XRESOLVE */
};
void dn_fib_free_info(struct dn_fib_info *fi)
write_unlock(&dn_fib_info_lock);
}
-static __inline__ int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi)
+static inline int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi)
{
const struct dn_fib_nh *onh = ofi->fib_nh;
return 0;
}
-static __inline__ struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi)
+static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi)
{
for_fib_info() {
if (fi->fib_nhs != nfi->fib_nhs)
if (nfi->fib_protocol == fi->fib_protocol &&
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
+ memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 &&
((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
(nfi->fib_nhs == 0 || dn_fib_nh_comp(fi, nfi) == 0))
return fi;
int err;
if (nh->nh_gw) {
- struct dn_fib_key key;
+ struct flowi fl;
struct dn_fib_res res;
+ memset(&fl, 0, sizeof(fl));
+
if (nh->nh_flags&RTNH_F_ONLINK) {
struct net_device *dev;
if (r->rtm_scope >= RT_SCOPE_LINK)
return -EINVAL;
+ if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST)
+ return -EINVAL;
if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL)
return -ENODEV;
if (!(dev->flags&IFF_UP))
return 0;
}
- memset(&key, 0, sizeof(key));
- key.dst = nh->nh_gw;
- key.oif = nh->nh_oif;
- key.scope = r->rtm_scope + 1;
+ memset(&fl, 0, sizeof(fl));
+ fl.fld_dst = nh->nh_gw;
+ fl.oif = nh->nh_oif;
+ fl.fld_scope = r->rtm_scope + 1;
- if (key.scope < RT_SCOPE_LINK)
- key.scope = RT_SCOPE_LINK;
+ if (fl.fld_scope < RT_SCOPE_LINK)
+ fl.fld_scope = RT_SCOPE_LINK;
- if ((err = dn_fib_lookup(&key, &res)) != 0)
+ if ((err = dn_fib_lookup(&fl, &res)) != 0)
return err;
+ err = -EINVAL;
+ if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
+ goto out;
nh->nh_scope = res.scope;
nh->nh_oif = DN_FIB_RES_OIF(res);
nh->nh_dev = DN_FIB_RES_DEV(res);
- if (nh->nh_dev)
- atomic_inc(&nh->nh_dev->refcnt);
+ if (nh->nh_dev == NULL)
+ goto out;
+ atomic_inc(&nh->nh_dev->refcnt);
+ err = -ENETDOWN;
+ if (!(nh->nh_dev->flags & IFF_UP))
+ goto out;
+ err = 0;
+out:
dn_fib_res_put(&res);
+ return err;
} else {
struct net_device *dev;
fi->fib_flags = r->rtm_flags;
if (rta->rta_priority)
fi->fib_priority = *rta->rta_priority;
+ if (rta->rta_mx) {
+ int attrlen = RTA_PAYLOAD(rta->rta_mx);
+ struct rtattr *attr = RTA_DATA(rta->rta_mx);
+
+ while(RTA_OK(attr, attrlen)) {
+ unsigned flavour = attr->rta_type;
+ if (flavour) {
+ if (flavour > RTAX_MAX)
+ goto err_inval;
+ fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr);
+ }
+ attr = RTA_NEXT(attr, attrlen);
+ }
+ }
if (rta->rta_prefsrc)
memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 2);
nh->nh_weight = 1;
}
+ if (r->rtm_type == RTN_NAT) {
+ if (rta->rta_gw == NULL || nhs != 1 || rta->rta_oif)
+ goto err_inval;
+ memcpy(&fi->fib_nh->nh_gw, rta->rta_gw, 2);
+ goto link_it;
+ }
+
if (dn_fib_props[r->rtm_type].error) {
if (rta->rta_gw || rta->rta_oif || rta->rta_mp)
goto err_inval;
} endfor_nexthops(fi)
}
-#if I_GET_AROUND_TO_FIXING_PREFSRC
if (fi->fib_prefsrc) {
if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL ||
memcmp(&fi->fib_prefsrc, rta->rta_dst, 2))
- if (dn_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
+ if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
goto err_inval;
}
-#endif
link_it:
if ((ofi = dn_fib_find_info(fi)) != NULL) {
return NULL;
}
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowi *fl, struct dn_fib_res *res)
+{
+ int err = dn_fib_props[type].error;
+
+ if (err == 0) {
+ if (fi->fib_flags & RTNH_F_DEAD)
+ return 1;
+
+ res->fi = fi;
+
+ switch(type) {
+ case RTN_NAT:
+ DN_FIB_RES_RESET(*res);
+ atomic_inc(&fi->fib_clntref);
+ return 0;
+ case RTN_UNICAST:
+ case RTN_LOCAL:
+ for_nexthops(fi) {
+ if (nh->nh_flags & RTNH_F_DEAD)
+ continue;
+ if (!fl->oif || fl->oif == nh->nh_oif)
+ break;
+ }
+ if (nhsel < fi->fib_nhs) {
+ res->nh_sel = nhsel;
+ atomic_inc(&fi->fib_clntref);
+ return 0;
+ }
+ endfor_nexthops(fi);
+ res->fi = NULL;
+ return 1;
+ default:
+ if (net_ratelimit())
+ printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", type);
+ res->fi = NULL;
+ return -EINVAL;
+ }
+ }
+ return err;
+}
-void dn_fib_select_multipath(const struct dn_fib_key *key, struct dn_fib_res *res)
+void dn_fib_select_multipath(const struct flowi *fl, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
int w;
+ spin_lock_bh(&dn_fib_multipath_lock);
if (fi->fib_power <= 0) {
int power = 0;
change_nexthops(fi) {
}
} endfor_nexthops(fi);
fi->fib_power = power;
+ if (power < 0) {
+ spin_unlock_bh(&dn_fib_multipath_lock);
+ res->nh_sel = 0;
+ return;
+ }
}
w = jiffies % fi->fib_power;
nh->nh_power--;
fi->fib_power--;
res->nh_sel = nhsel;
+ spin_unlock_bh(&dn_fib_multipath_lock);
return;
}
}
} endfor_nexthops(fi);
-
- printk(KERN_DEBUG "DECnet: BUG! dn_fib_select_multipath\n");
+ res->nh_sel = 0;
+ spin_unlock_bh(&dn_fib_multipath_lock);
}
-
/*
* Punt to user via netlink for example, but for now
* we just drop it.
s_t = cb->args[0];
if (s_t == 0)
- s_t = cb->args[0] = DN_MIN_TABLE;
+ s_t = cb->args[0] = RT_MIN_TABLE;
- for(t = s_t; t < DN_NUM_TABLES; t++) {
+ for(t = s_t; t <= RT_TABLE_MAX; t++) {
if (t < s_t)
continue;
if (t > s_t)
return skb->len;
}
+static void fib_magic(int cmd, int type, __u16 dst, int dst_len, struct dn_ifaddr *ifa)
+{
+ struct dn_fib_table *tb;
+ struct {
+ struct nlmsghdr nlh;
+ struct rtmsg rtm;
+ } req;
+ struct dn_kern_rta rta;
+
+ memset(&req.rtm, 0, sizeof(req.rtm));
+ memset(&rta, 0, sizeof(rta));
+
+ if (type == RTN_UNICAST)
+ tb = dn_fib_get_table(RT_MIN_TABLE, 1);
+ else
+ tb = dn_fib_get_table(RT_TABLE_LOCAL, 1);
+
+ if (tb == NULL)
+ return;
+
+ req.nlh.nlmsg_len = sizeof(req);
+ req.nlh.nlmsg_type = cmd;
+ req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND;
+ req.nlh.nlmsg_pid = 0;
+ req.nlh.nlmsg_seq = 0;
+
+ req.rtm.rtm_dst_len = dst_len;
+ req.rtm.rtm_table = tb->n;
+ req.rtm.rtm_protocol = RTPROT_KERNEL;
+ req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST);
+ req.rtm.rtm_type = type;
+
+ rta.rta_dst = &dst;
+ rta.rta_prefsrc = &ifa->ifa_local;
+ rta.rta_oif = &ifa->ifa_dev->dev->ifindex;
+
+ if (cmd == RTM_NEWROUTE)
+ tb->insert(tb, &req.rtm, &rta, &req.nlh, NULL);
+ else
+ tb->delete(tb, &req.rtm, &rta, &req.nlh, NULL);
+}
+
+static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa)
+{
+
+ fib_magic(RTM_NEWROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
+
+#if 0
+ if (!(dev->flags&IFF_UP))
+ return;
+ /* In the future, we will want to add default routes here */
+
+#endif
+}
+
+static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
+{
+ int found_it = 0;
+ struct net_device *dev;
+ struct dn_dev *dn_db;
+ struct dn_ifaddr *ifa2;
+
+ ASSERT_RTNL();
+
+ /* Scan device list */
+ read_lock(&dev_base_lock);
+ for(dev = dev_base; dev; dev = dev->next) {
+ dn_db = dev->dn_ptr;
+ if (dn_db == NULL)
+ continue;
+ for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) {
+ if (ifa2->ifa_local == ifa->ifa_local) {
+ found_it = 1;
+ break;
+ }
+ }
+ }
+ read_unlock(&dev_base_lock);
+
+ if (found_it == 0) {
+ fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
+
+ if (dnet_addr_type(ifa->ifa_local) != RTN_LOCAL) {
+ if (dn_fib_sync_down(ifa->ifa_local, NULL, 0))
+ dn_fib_flush();
+ }
+ }
+}
+
+static void dn_fib_disable_addr(struct net_device *dev, int force)
+{
+ if (dn_fib_sync_down(0, dev, force))
+ dn_fib_flush();
+ dn_rt_cache_flush(0);
+ neigh_ifdown(&dn_neigh_table, dev);
+}
+
+static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr;
+
+ switch(event) {
+ case NETDEV_UP:
+ dn_fib_add_ifaddr(ifa);
+ dn_fib_sync_up(ifa->ifa_dev->dev);
+ dn_rt_cache_flush(-1);
+ break;
+ case NETDEV_DOWN:
+ dn_fib_del_ifaddr(ifa);
+ if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
+ dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
+ } else {
+ dn_rt_cache_flush(-1);
+ }
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
int dn_fib_sync_down(dn_address local, struct net_device *dev, int force)
{
int ret = 0;
dead++;
else if (nh->nh_dev == dev &&
nh->nh_scope != scope) {
+ spin_lock_bh(&dn_fib_multipath_lock);
nh->nh_flags |= RTNH_F_DEAD;
fi->fib_power -= nh->nh_power;
nh->nh_power = 0;
+ spin_unlock_bh(&dn_fib_multipath_lock);
dead++;
}
} endfor_nexthops(fi)
if (nh->nh_dev != dev || dev->dn_ptr == NULL)
continue;
alive++;
+ spin_lock_bh(&dn_fib_multipath_lock);
nh->nh_power = 0;
nh->nh_flags &= ~RTNH_F_DEAD;
+ spin_unlock_bh(&dn_fib_multipath_lock);
} endfor_nexthops(fi);
- if (alive == fi->fib_nhs) {
+ if (alive > 0) {
fi->fib_flags &= ~RTNH_F_DEAD;
ret++;
}
struct dn_fib_table *tb;
int id;
- for(id = DN_NUM_TABLES; id > 0; id--) {
+ for(id = RT_TABLE_MAX; id > 0; id--) {
if ((tb = dn_fib_get_table(id, 0)) == NULL)
continue;
flushed += tb->flush(tb);
dn_rt_cache_flush(-1);
}
-int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
-{
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- switch(cmd) {
- case SIOCADDRT:
- case SIOCDELRT:
- return 0;
- }
-
- return -EINVAL;
-}
-
#ifdef CONFIG_PROC_FS
static int decnet_rt_get_info(char *buffer, char **start, off_t offset, int length)
}
- for(i = DN_MIN_TABLE; (i <= DN_NUM_TABLES) && (count > 0); i++) {
+ for(i = RT_MIN_TABLE; (i <= RT_TABLE_MAX) && (count > 0); i++) {
if ((tb = dn_fib_get_table(i, 0)) != NULL) {
int n = tb->get_info(tb, ptr, first, count);
count -= n;
}
#endif /* CONFIG_PROC_FS */
+static struct notifier_block dn_fib_dnaddr_notifier = {
+ .notifier_call = dn_fib_dnaddr_event,
+};
+
void __exit dn_fib_cleanup(void)
{
proc_net_remove("decnet_route");
dn_fib_table_cleanup();
dn_fib_rules_cleanup();
+
+ unregister_dnaddr_notifier(&dn_fib_dnaddr_notifier);
}
dn_fib_table_init();
dn_fib_rules_init();
+
+ register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
}
* Steve Whitehouse : Fixed neighbour states (for now anyway).
* Steve Whitehouse : Made error_report functions dummies. This
* is not the right place to return skbs.
+ * Steve Whitehouse : Convert to seq_file
*
*/
#include <linux/string.h>
#include <linux/netfilter_decnet.h>
#include <linux/spinlock.h>
+#include <linux/seq_file.h>
#include <asm/atomic.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_dev.h>
#include <net/dn_neigh.h>
return -EINVAL;
}
- dn->blksize = 230;
+ /*
+ * Make an estimate of the remote block size by assuming that its
+ * two less then the device mtu, which it true for ethernet (and
+ * other things which support long format headers) since there is
+ * an extra length field (of 16 bits) which isn't part of the
+ * ethernet headers and which the DECnet specs won't admit is part
+ * of the DECnet routing headers either.
+ *
+ * If we over estimate here its no big deal, the NSP negotiations
+ * will prevent us from sending packets which are too large for the
+ * remote node to handle. In any case this figure is normally updated
+ * by a hello message in most cases.
+ */
+ dn->blksize = dev->mtu - 2;
return 0;
}
* basically does a neigh_lookup(), but without comparing the device
* field. This is required for the On-Ethernet cache
*/
-struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, void *ptr)
+struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr)
{
struct neighbour *neigh;
u32 hash_val;
return 0;
}
-
-#ifdef CONFIG_DECNET_ROUTER
static char *dn_find_slot(char *base, int max, int priority)
{
int i;
return t;
}
-#endif /* CONFIG_DECNET_ROUTER */
-
#ifdef CONFIG_PROC_FS
-static int dn_neigh_get_info(char *buffer, char **start, off_t offset, int length)
+
+struct dn_neigh_iter_state {
+ int bucket;
+};
+
+static struct neighbour *neigh_get_first(struct seq_file *seq)
{
- int len = 0;
- off_t pos = 0;
- off_t begin = 0;
- struct neighbour *n;
- int i;
- char buf[DN_ASCBUF_LEN];
+ struct dn_neigh_iter_state *state = seq->private;
+ struct neighbour *n = NULL;
+
+ for(state->bucket = 0;
+ state->bucket <= NEIGH_HASHMASK;
+ ++state->bucket) {
+ n = dn_neigh_table.hash_buckets[state->bucket];
+ if (n)
+ break;
+ }
- len += sprintf(buffer + len, "Addr Flags State Use Blksize Dev\n");
-
- for(i=0;i <= NEIGH_HASHMASK; i++) {
- read_lock_bh(&dn_neigh_table.lock);
- n = dn_neigh_table.hash_buckets[i];
- for(; n != NULL; n = n->next) {
- struct dn_neigh *dn = (struct dn_neigh *)n;
-
- read_lock(&n->lock);
- len += sprintf(buffer+len, "%-7s %s%s%s %02x %02d %07ld %-8s\n",
- dn_addr2asc(dn_ntohs(dn->addr), buf),
- (dn->flags&DN_NDFLAG_R1) ? "1" : "-",
- (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
- (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
- dn->n.nud_state,
- atomic_read(&dn->n.refcnt),
- dn->blksize,
- (dn->n.dev) ? dn->n.dev->name : "?");
- read_unlock(&n->lock);
-
- pos = begin + len;
-
- if (pos < offset) {
- len = 0;
- begin = pos;
- }
-
- if (pos > offset + length) {
- read_unlock_bh(&dn_neigh_table.lock);
- goto done;
- }
- }
+ return n;
+}
+
+static struct neighbour *neigh_get_next(struct seq_file *seq,
+ struct neighbour *n)
+{
+ struct dn_neigh_iter_state *state = seq->private;
+
+ n = n->next;
+try_again:
+ if (n)
+ goto out;
+ if (++state->bucket > NEIGH_HASHMASK)
+ goto out;
+ n = dn_neigh_table.hash_buckets[state->bucket];
+ goto try_again;
+out:
+ return n;
+}
+
+static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
+{
+ struct neighbour *n = neigh_get_first(seq);
+
+ if (n)
+ while(*pos && (n = neigh_get_next(seq, n)))
+ --*pos;
+ return *pos ? NULL : n;
+}
+
+static void *dn_neigh_get_idx(struct seq_file *seq, loff_t pos)
+{
+ void *rc;
+ read_lock_bh(&dn_neigh_table.lock);
+ rc = neigh_get_idx(seq, &pos);
+ if (!rc) {
+ read_unlock_bh(&dn_neigh_table.lock);
+ }
+ return rc;
+}
+
+static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? dn_neigh_get_idx(seq, *pos - 1) : (void*)1;
+}
+
+static void *dn_neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ void *rc;
+
+
+ if (v == (void*)1) {
+ rc = dn_neigh_get_idx(seq, 0);
+ goto out;
+ }
+
+ rc = neigh_get_next(seq, v);
+ if (rc)
+ goto out;
+ read_unlock_bh(&dn_neigh_table.lock);
+out:
+ ++*pos;
+ return rc;
+}
+
+static void dn_neigh_seq_stop(struct seq_file *seq, void *v)
+{
+ if (v && v != (void*)1)
read_unlock_bh(&dn_neigh_table.lock);
+}
+
+static inline void dn_neigh_format_entry(struct seq_file *seq,
+ struct neighbour *n)
+{
+ struct dn_neigh *dn = (struct dn_neigh *)n;
+ char buf[DN_ASCBUF_LEN];
+
+ read_lock(&n->lock);
+ seq_printf(seq, "%-7s %s%s%s %02x %02d %07ld %-8s\n",
+ dn_addr2asc(dn_ntohs(dn->addr), buf),
+ (dn->flags&DN_NDFLAG_R1) ? "1" : "-",
+ (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
+ (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
+ dn->n.nud_state,
+ atomic_read(&dn->n.refcnt),
+ dn->blksize,
+ (dn->n.dev) ? dn->n.dev->name : "?");
+ read_unlock(&n->lock);
+}
+
+static int dn_neigh_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == (void*)1) {
+ seq_puts(seq, "Addr Flags State Use Blksize Dev\n");
+ } else {
+ dn_neigh_format_entry(seq, v);
}
-done:
+ return 0;
+}
+
+static struct seq_operations dn_neigh_seq_ops = {
+ .start = dn_neigh_seq_start,
+ .next = dn_neigh_seq_next,
+ .stop = dn_neigh_seq_stop,
+ .show = dn_neigh_seq_show,
+};
- *start = buffer + (offset - begin);
- len -= offset - begin;
+static int dn_neigh_seq_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int rc = -ENOMEM;
+ struct dn_neigh_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (!s)
+ goto out;
+
+ rc = seq_open(file, &dn_neigh_seq_ops);
+ if (rc)
+ goto out_kfree;
+
+ seq = file->private_data;
+ seq->private = s;
+ memset(s, 0, sizeof(*s));
+out:
+ return rc;
+out_kfree:
+ kfree(s);
+ goto out;
+}
- if (len > length) len = length;
+static int dn_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
- return len;
+ kfree(seq->private);
+ seq->private = NULL;
+ return seq_release(inode, file);
}
+static struct file_operations dn_neigh_seq_fops = {
+ .open = dn_neigh_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = dn_seq_release,
+};
+
+static int __init dn_neigh_proc_init(void)
+{
+ int rc = 0;
+ struct proc_dir_entry *p = create_proc_entry("decnet_neigh", S_IRUGO, proc_net);
+ if (p)
+ p->proc_fops = &dn_neigh_seq_fops;
+ else
+ rc = -ENOMEM;
+ return rc;
+}
+
+#else
+static int __init dn_neigh_proc_init(void)
+{
+ return 0;
+}
#endif
void __init dn_neigh_init(void)
{
neigh_table_init(&dn_neigh_table);
-#ifdef CONFIG_PROC_FS
- proc_net_create("decnet_neigh",0,dn_neigh_get_info);
-#endif /* CONFIG_PROC_FS */
+ dn_neigh_proc_init();
}
void __exit dn_neigh_cleanup(void)
{
- proc_net_remove("decnet_neigh");
neigh_table_clear(&dn_neigh_table);
}
sk->state_change(sk);
}
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
+ /*
+ * It appears that its possible for remote machines to send disc
+ * init messages with no port identifier if we are in the CI and
+ * possibly also the CD state. Obviously we shouldn't reply with
+ * a message if we don't know what the end point is.
+ */
+ if (scp->addrrem) {
+ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
+ }
scp->persist_fxn = dn_destroy_timer;
scp->persist = dn_nsp_persist(sk);
* Paul Koning: Connect Confirm message fix.
* Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets.
* Steve Whitehouse: dn_nsp_output() and friends needed a spring clean
+ * Steve Whitehouse: Moved dn_nsp_send() in here from route.h
*/
/******************************************************************************
#include <linux/if_packet.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_nsp.h>
#include <net/dn_dev.h>
static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
+static void dn_nsp_send(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct dn_scp *scp = DN_SK(sk);
+ struct dst_entry *dst;
+ struct flowi fl;
+
+ skb->h.raw = skb->data;
+ scp->stamp = jiffies;
+
+ dst = sk_dst_check(sk, 0);
+ if (dst) {
+try_again:
+ skb->dst = dst;
+ dst_output(skb);
+ return;
+ }
+
+ memset(&fl, 0, sizeof(fl));
+ fl.oif = sk->bound_dev_if;
+ fl.fld_src = dn_saddr2dn(&scp->addr);
+ fl.fld_dst = dn_saddr2dn(&scp->peer);
+ dn_sk_ports_copy(&fl, scp);
+ if (dn_route_output_sock(&sk->dst_cache, &fl, sk, 0) == 0) {
+ dst = sk_dst_get(sk);
+ sk->route_caps = dst->dev->features;
+ goto try_again;
+ }
+
+ sk->err = EHOSTUNREACH;
+ if (!test_bit(SOCK_DEAD, &sk->flags))
+ sk->state_change(sk);
+}
+
+
/*
* If sk == NULL, then we assume that we are supposed to be making
* a routing layer skb. If sk != NULL, then we are supposed to be
return ptr;
}
+static unsigned short *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
+{
+ struct dn_scp *scp = DN_SK(sk);
+ struct dn_skb_cb *cb = DN_SKB_CB(skb);
+ unsigned short *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
+
+ if (unlikely(oth)) {
+ cb->segnum = scp->numoth;
+ seq_add(&scp->numoth, 1);
+ } else {
+ cb->segnum = scp->numdat;
+ seq_add(&scp->numdat, 1);
+ }
+ *(ptr++) = dn_htons(cb->segnum);
+
+ return ptr;
+}
+
void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth)
{
struct dn_scp *scp = DN_SK(sk);
struct dn_skb_cb *cb = DN_SKB_CB(skb);
unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
+ cb->xmit_count = 0;
+ dn_nsp_mk_data_header(sk, skb, oth);
+
/*
* Slow start: If we have been idle for more than
* one RTT, then reset window to min size.
if ((jiffies - scp->stamp) > t)
scp->snd_window = NSP_MIN_WINDOW;
- /* printk(KERN_DEBUG "Window: %lu\n", scp->snd_window); */
-
- cb->xmit_count = 0;
-
if (oth)
skb_queue_tail(&scp->other_xmit_queue, skb);
else
{
struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb;
- unsigned short *segnum;
unsigned char *ptr;
int gfp = GFP_ATOMIC;
- if ((skb = dn_alloc_skb(sk, 13, gfp)) == NULL)
+ if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
return;
- skb_reserve(skb, 13);
- segnum = dn_mk_ack_header(sk, skb, 0x10, 13, 1);
- *segnum = dn_htons(scp->numoth);
- DN_SKB_CB(skb)->segnum = scp->numoth;
- seq_add(&scp->numoth, 1);
- ptr = (unsigned char *)(segnum + 1);
+ skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
+ ptr = skb_put(skb, 2);
+ DN_SKB_CB(skb)->nsp_flags = 0x10;
*ptr++ = lsflags;
*ptr = fcval;
* backlog congestion level return codes.
* Steve Whitehouse : Fixed bug where routes were set up with
* no ref count on net devices.
- *
+ * Steve Whitehouse : RCU for the route cache
+ * Steve Whitehouse : Preparations for the flow cache
*/
/******************************************************************************
#include <linux/netdevice.h>
#include <linux/inet.h>
#include <linux/route.h>
+#include <linux/in_route.h>
#include <net/sock.h>
-#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/string.h>
#include <linux/netfilter_decnet.h>
+#include <linux/rcupdate.h>
#include <asm/errno.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_dev.h>
#include <net/dn_nsp.h>
struct dn_rt_hash_bucket
{
struct dn_route *chain;
- rwlock_t lock;
+ spinlock_t lock;
} __attribute__((__aligned__(8)));
extern struct neigh_table dn_neigh_table;
return dn_rt_hash_mask & (unsigned)tmp;
}
+static inline void dnrt_free(struct dn_route *rt)
+{
+ call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
+}
+
+static inline void dnrt_drop(struct dn_route *rt)
+{
+ if (rt)
+ dst_release(&rt->u.dst);
+ call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
+}
+
static void SMP_TIMER_NAME(dn_dst_check_expire)(unsigned long dummy)
{
int i;
for(i = 0; i <= dn_rt_hash_mask; i++) {
rtp = &dn_rt_hash_table[i].chain;
- write_lock(&dn_rt_hash_table[i].lock);
+ spin_lock(&dn_rt_hash_table[i].lock);
while((rt=*rtp) != NULL) {
if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire) {
}
*rtp = rt->u.rt_next;
rt->u.rt_next = NULL;
- dst_free(&rt->u.dst);
+ dnrt_free(rt);
}
- write_unlock(&dn_rt_hash_table[i].lock);
+ spin_unlock(&dn_rt_hash_table[i].lock);
if ((jiffies - now) > 0)
break;
for(i = 0; i <= dn_rt_hash_mask; i++) {
- write_lock_bh(&dn_rt_hash_table[i].lock);
+ spin_lock_bh(&dn_rt_hash_table[i].lock);
rtp = &dn_rt_hash_table[i].chain;
while((rt=*rtp) != NULL) {
}
*rtp = rt->u.rt_next;
rt->u.rt_next = NULL;
- dst_free(&rt->u.dst);
+ dnrt_drop(rt);
break;
}
- write_unlock_bh(&dn_rt_hash_table[i].lock);
+ spin_unlock_bh(&dn_rt_hash_table[i].lock);
}
return 0;
return;
}
-static void dn_insert_route(struct dn_route *rt, unsigned hash)
+static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
+ return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 &&
+ fl1->oif == fl2->oif &&
+ fl1->iif == fl2->iif;
+}
+
+static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+{
+ struct dn_route *rth, **rthp;
unsigned long now = jiffies;
- write_lock_bh(&dn_rt_hash_table[hash].lock);
+ rthp = &dn_rt_hash_table[hash].chain;
+
+ spin_lock_bh(&dn_rt_hash_table[hash].lock);
+ while((rth = *rthp) != NULL) {
+ if (compare_keys(&rth->fl, &rt->fl)) {
+ /* Put it first */
+ *rthp = rth->u.rt_next;
+ smp_wmb();
+ rth->u.rt_next = dn_rt_hash_table[hash].chain;
+ smp_wmb();
+ dn_rt_hash_table[hash].chain = rth;
+
+ rth->u.dst.__use++;
+ dst_hold(&rth->u.dst);
+ rth->u.dst.lastuse = now;
+ spin_unlock_bh(&dn_rt_hash_table[hash].lock);
+
+ dnrt_drop(rt);
+ *rp = rth;
+ return 0;
+ }
+ rthp = &rth->u.rt_next;
+ }
+
+ smp_wmb();
rt->u.rt_next = dn_rt_hash_table[hash].chain;
+ smp_wmb();
dn_rt_hash_table[hash].chain = rt;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
rt->u.dst.lastuse = now;
-
- write_unlock_bh(&dn_rt_hash_table[hash].lock);
+ spin_unlock_bh(&dn_rt_hash_table[hash].lock);
+ *rp = rt;
+ return 0;
}
void SMP_TIMER_NAME(dn_run_flush)(unsigned long dummy)
struct dn_route *rt, *next;
for(i = 0; i < dn_rt_hash_mask; i++) {
- write_lock_bh(&dn_rt_hash_table[i].lock);
+ spin_lock_bh(&dn_rt_hash_table[i].lock);
if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
goto nothing_to_declare;
}
nothing_to_declare:
- write_unlock_bh(&dn_rt_hash_table[i].lock);
+ spin_unlock_bh(&dn_rt_hash_table[i].lock);
}
}
return err;
}
-#ifdef CONFIG_DECNET_ROUTER
static int dn_forward(struct sk_buff *skb)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct dst_entry *dst = skb->dst;
- struct neighbour *neigh;
+ struct dn_dev *dn_db = dst->dev->dn_ptr;
+ struct dn_route *rt;
+ struct neighbour *neigh = dst->neighbour;
+ int header_len;
#ifdef CONFIG_NETFILTER
struct net_device *dev = skb->dev;
#endif
- int err = -EINVAL;
- if ((neigh = dst->neighbour) == NULL)
- goto error;
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
+ /* Ensure that we have enough space for headers */
+ rt = (struct dn_route *)skb->dst;
+ header_len = dn_db->use_long ? 21 : 6;
+ if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
+ goto drop;
/*
* Hop count exceeded.
*/
- err = NET_RX_DROP;
if (++cb->hops > 30)
goto drop;
- skb->dev = dst->dev;
+ skb->dev = rt->u.dst.dev;
/*
* If packet goes out same interface it came in on, then set
* the Intra-Ethernet bit. This has no effect for short
* packets, so we don't need to test for them here.
*/
- if (cb->iif == dst->dev->ifindex)
+ cb->rt_flags &= ~DN_RT_F_IE;
+ if (rt->rt_flags | RTCF_DOREDIRECT)
cb->rt_flags |= DN_RT_F_IE;
- else
- cb->rt_flags &= ~DN_RT_F_IE;
return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
-
-error:
- if (net_ratelimit())
- printk(KERN_DEBUG "dn_forward: This should not happen\n");
drop:
kfree_skb(skb);
-
- return err;
+ return NET_RX_DROP;
}
-#endif
/*
* Drop packet. This is used for endnodes and for
return NET_RX_BAD;
}
-static int dn_route_output_slow(struct dst_entry **pprt, dn_address dst, dn_address src, int flags)
+static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
+{
+ struct dn_fib_info *fi = res->fi;
+ struct net_device *dev = rt->u.dst.dev;
+ struct neighbour *n;
+ unsigned mss;
+
+ if (fi) {
+ if (DN_FIB_RES_GW(*res) &&
+ DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
+ rt->rt_gateway = DN_FIB_RES_GW(*res);
+ memcpy(rt->u.dst.metrics, fi->fib_metrics,
+ sizeof(rt->u.dst.metrics));
+ }
+ rt->rt_type = res->type;
+
+ if (dev != NULL && rt->u.dst.neighbour == NULL) {
+ n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ rt->u.dst.neighbour = n;
+ }
+
+ if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
+ rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
+ rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
+ mss = dn_mss_from_pmtu(dev, dst_pmtu(&rt->u.dst));
+ if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
+ rt->u.dst.metrics[RTAX_ADVMSS-1] > mss)
+ rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
+ return 0;
+}
+
+static inline int dn_match_addr(__u16 addr1, __u16 addr2)
{
+ __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2);
+ int match = 16;
+ while(tmp)
+ tmp >>= 1, match--;
+ return match;
+}
+
+static __u16 dnet_select_source(const struct net_device *dev, __u16 daddr, int scope)
+{
+ __u16 saddr = 0;
+ struct dn_dev *dn_db = dev->dn_ptr;
+ struct dn_ifaddr *ifa;
+ int best_match = 0;
+ int ret;
+
+ read_lock(&dev_base_lock);
+ for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+ if (ifa->ifa_scope > scope)
+ continue;
+ if (!daddr) {
+ saddr = ifa->ifa_local;
+ break;
+ }
+ ret = dn_match_addr(daddr, ifa->ifa_local);
+ if (ret > best_match)
+ saddr = ifa->ifa_local;
+ if (best_match == 0)
+ saddr = ifa->ifa_local;
+ }
+ read_unlock(&dev_base_lock);
+
+ return saddr;
+}
+
+static inline __u16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
+{
+ return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
+}
+
+static inline __u16 dn_fib_rules_map_destination(__u16 daddr, struct dn_fib_res *res)
+{
+ __u16 mask = dnet_make_mask(res->prefixlen);
+ return (daddr&~mask)|res->fi->fib_nh->nh_gw;
+}
+
+static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
+{
+ struct flowi fl = { .nl_u = { .dn_u =
+ { .daddr = oldflp->fld_dst,
+ .saddr = oldflp->fld_src,
+ .scope = RT_SCOPE_UNIVERSE,
+#ifdef CONFIG_DECNET_ROUTE_FWMARK
+ .fwmark = oldflp->fld_fwmark
+#endif
+ } },
+ .iif = loopback_dev.ifindex,
+ .oif = oldflp->oif };
struct dn_route *rt = NULL;
- struct net_device *dev = NULL;
+ struct net_device *dev_out = NULL;
struct neighbour *neigh = NULL;
- struct dn_dev *dn_db;
unsigned hash;
-
-#ifdef CONFIG_DECNET_ROUTER
- struct dn_fib_key key;
- struct dn_fib_res res;
+ unsigned flags = 0;
+ struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
int err;
+ int free_res = 0;
+ __u16 gateway = 0;
- key.src = src;
- key.dst = dst;
- key.iif = 0;
- key.oif = 0;
- key.fwmark = 0;
- key.scope = RT_SCOPE_UNIVERSE;
-
- if ((err = dn_fib_lookup(&key, &res)) == 0) {
- switch(res.type) {
- case RTN_UNICAST:
- /*
- * This method of handling multipath
- * routes is a hack and will change.
- * It works for now though.
- */
- if (res.fi->fib_nhs)
- dn_fib_select_multipath(&key, &res);
- neigh = __neigh_lookup(&dn_neigh_table, &DN_FIB_RES_GW(res), DN_FIB_RES_DEV(res), 1);
- err = -ENOBUFS;
- if (!neigh)
- break;
- err = 0;
- break;
- case RTN_UNREACHABLE:
- err = -EHOSTUNREACH;
- break;
- default:
- err = -EINVAL;
+ if (decnet_debug_level & 16)
+ printk(KERN_DEBUG
+ "dn_route_output_slow: dst=%04x src=%04x mark=%d"
+ " iif=%d oif=%d\n", oldflp->fld_dst, oldflp->fld_src,
+ oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif);
+
+ /* If we have an output interface, verify its a DECnet device */
+ if (oldflp->oif) {
+ dev_out = dev_get_by_index(oldflp->oif);
+ err = -ENODEV;
+ if (dev_out && dev_out->dn_ptr == NULL) {
+ dev_put(dev_out);
+ dev_out = NULL;
}
- dn_fib_res_put(&res);
- if (err < 0)
- return err;
- goto got_route;
+ if (dev_out == NULL)
+ goto out;
}
- if (err != -ESRCH)
- return err;
-#endif
+ /* If we have a source address, verify that its a local address */
+ if (oldflp->fld_src) {
+ err = -EADDRNOTAVAIL;
- /* Look in On-Ethernet cache first */
- if (!(flags & MSG_TRYHARD)) {
- if ((neigh = dn_neigh_lookup(&dn_neigh_table, &dst)) != NULL)
- goto got_route;
+ if (dev_out) {
+ if (dn_dev_islocal(dev_out, oldflp->fld_src))
+ goto source_ok;
+ dev_put(dev_out);
+ goto out;
+ }
+ read_lock(&dev_base_lock);
+ for(dev_out = dev_base; dev_out; dev_out = dev_out->next) {
+ if (!dev_out->dn_ptr)
+ continue;
+ if (dn_dev_islocal(dev_out, oldflp->fld_src))
+ break;
+ }
+ read_unlock(&dev_base_lock);
+ if (dev_out == NULL)
+ goto out;
+ dev_hold(dev_out);
+source_ok:
}
- dev = dn_dev_get_default();
- if (dev == NULL)
- return -EINVAL;
+ /* No destination? Assume its local */
+ if (!fl.fld_dst) {
+ fl.fld_dst = fl.fld_src;
+#if 0
+ if (!fl.fld_dst)
+ /* grab an address from loopback? */
+#endif
+ err = -EADDRNOTAVAIL;
+ if (dev_out)
+ dev_put(dev_out);
+ if (!fl.fld_dst)
+ goto out;
+ dev_out = &loopback_dev;
+ dev_hold(dev_out);
+ fl.oif = loopback_dev.ifindex;
+ res.type = RTN_LOCAL;
+ goto make_route;
+ }
- dn_db = dev->dn_ptr;
+ if (decnet_debug_level & 16)
+ printk(KERN_DEBUG
+ "dn_route_output_slow: initial checks complete."
+ " dst=%o4x src=%04x oif=%d try_hard=%d\n", fl.fld_dst,
+ fl.fld_src, fl.oif, try_hard);
- /* Check to see if its one of our own local addresses */
- if (dn_dev_islocal(dev, dst)) {
- struct net_device *lo = &loopback_dev;
- if (lo->dn_ptr) {
- neigh = __neigh_lookup(&dn_neigh_table, &dst, lo, 1);
- if (neigh)
- goto got_route;
+ /*
+ * N.B. If the kernel is compiled without router support then
+ * dn_fib_lookup() will evaluate to non-zero so this if () block
+ * will always be executed.
+ */
+ err = -ESRCH;
+ if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) {
+ struct dn_dev *dn_db;
+ if (err != -ESRCH)
+ goto out;
+ /*
+ * Here the fallback is basically the standard algorithm for
+ * routing in endnodes which is described in the DECnet routing
+ * docs
+ *
+ * If we are not trying hard, look in neighbour cache.
+ * The result is tested to ensure that if a specific output
+ * device/source address was requested, then we honour that
+ * here
+ */
+ if (!try_hard) {
+ neigh = dn_neigh_lookup(&dn_neigh_table, &fl.fld_dst);
+ if (neigh) {
+ if ((oldflp->oif &&
+ (neigh->dev->ifindex != oldflp->oif)) ||
+ (oldflp->fld_src &&
+ (!dn_dev_islocal(neigh->dev,
+ oldflp->fld_src)))) {
+ neigh_release(neigh);
+ neigh = NULL;
+ } else {
+ if (dev_out)
+ dev_put(dev_out);
+ if (dn_dev_islocal(neigh->dev, fl.fld_dst)) {
+ dev_out = &loopback_dev;
+ res.type = RTN_LOCAL;
+ } else {
+ dev_out = neigh->dev;
+ }
+ dev_hold(dev_out);
+ goto select_source;
+ }
+ }
}
- if (net_ratelimit())
- printk("dn_route_output_slow: Dest is local interface address, but loopback device is not up\n");
- dev_put(dev);
- return -EINVAL;
- }
- /* Try default router */
- if ((neigh = neigh_clone(dn_db->router)) != NULL)
- goto got_route;
+ /* Not there? Perhaps its a local address */
+ if (dev_out == NULL)
+ dev_out = dn_dev_get_default();
+ err = -ENODEV;
+ if (dev_out == NULL)
+ goto out;
+ dn_db = dev_out->dn_ptr;
+ /* Possible improvement - check all devices for local addr */
+ if (dn_dev_islocal(dev_out, fl.fld_dst)) {
+ dev_put(dev_out);
+ dev_out = &loopback_dev;
+ dev_hold(dev_out);
+ res.type = RTN_LOCAL;
+ goto select_source;
+ }
+ /* Not local either.... try sending it to the default router */
+ neigh = neigh_clone(dn_db->router);
+ BUG_ON(neigh && neigh->dev != dev_out);
+
+ /* Ok then, we assume its directly connected and move on */
+select_source:
+ if (neigh)
+ gateway = ((struct dn_neigh *)neigh)->addr;
+ if (gateway == 0)
+ gateway = fl.fld_dst;
+ if (fl.fld_src == 0) {
+ fl.fld_src = dnet_select_source(dev_out, gateway,
+ res.type == RTN_LOCAL ?
+ RT_SCOPE_HOST :
+ RT_SCOPE_LINK);
+ if (fl.fld_src == 0 && res.type != RTN_LOCAL)
+ goto e_addr;
+ }
+ fl.oif = dev_out->ifindex;
+ goto make_route;
+ }
+ free_res = 1;
+
+ if (res.type == RTN_NAT)
+ goto e_inval;
+
+ if (res.type == RTN_LOCAL) {
+ if (!fl.fld_src)
+ fl.fld_src = fl.fld_dst;
+ if (dev_out)
+ dev_put(dev_out);
+ dev_out = &loopback_dev;
+ dev_hold(dev_out);
+ fl.oif = dev_out->ifindex;
+ if (res.fi)
+ dn_fib_info_put(res.fi);
+ res.fi = NULL;
+ goto make_route;
+ }
- /* Send to default device (and hope for the best) if above fail */
- if ((neigh = __neigh_lookup(&dn_neigh_table, &dst, dev, 1)) != NULL)
- goto got_route;
+ if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ dn_fib_select_multipath(&fl, &res);
- dev_put(dev);
- return -EINVAL;
+ /*
+ * We could add some logic to deal with default routes here and
+ * get rid of some of the special casing above.
+ */
-got_route:
- if (dev)
- dev_put(dev);
+ if (!fl.fld_src)
+ fl.fld_src = DN_FIB_RES_PREFSRC(res);
+
+ if (dev_out)
+ dev_put(dev_out);
+ dev_out = DN_FIB_RES_DEV(res);
+ dev_hold(dev_out);
+ fl.oif = dev_out->ifindex;
+ gateway = DN_FIB_RES_GW(res);
+
+make_route:
+ if (dev_out->flags & IFF_LOOPBACK)
+ flags |= RTCF_LOCAL;
+
+ rt = dst_alloc(&dn_dst_ops);
+ if (rt == NULL)
+ goto e_nobufs;
+
+ atomic_set(&rt->u.dst.__refcnt, 1);
+ rt->u.dst.flags = DST_HOST;
+
+ rt->fl.fld_src = oldflp->fld_src;
+ rt->fl.fld_dst = oldflp->fld_dst;
+ rt->fl.oif = oldflp->oif;
+ rt->fl.iif = 0;
+#ifdef CONFIG_DECNET_ROUTE_FWMARK
+ rt->fl.fld_fwmark = flp->fld_fwmark;
+#endif
- if ((rt = dst_alloc(&dn_dst_ops)) == NULL) {
- neigh_release(neigh);
- return -EINVAL;
- }
+ rt->rt_saddr = fl.fld_src;
+ rt->rt_daddr = fl.fld_dst;
+ rt->rt_gateway = gateway ? gateway : fl.fld_dst;
- dn_db = (struct dn_dev *)neigh->dev->dn_ptr;
-
- rt->key.saddr = src;
- rt->rt_saddr = src;
- rt->key.daddr = dst;
- rt->rt_daddr = dst;
- rt->key.oif = neigh ? neigh->dev->ifindex : -1;
- rt->key.iif = 0;
- rt->key.fwmark = 0;
+ rt->rt_dst_map = fl.fld_dst;
+ rt->rt_src_map = fl.fld_src;
+ rt->u.dst.dev = dev_out;
+ dev_hold(dev_out);
rt->u.dst.neighbour = neigh;
- rt->u.dst.dev = neigh ? neigh->dev : NULL;
- if (rt->u.dst.dev)
- dev_hold(rt->u.dst.dev);
+ neigh = NULL;
+
rt->u.dst.lastuse = jiffies;
- rt->u.dst.output = dn_output;
- rt->u.dst.input = dn_rt_bug;
+ rt->u.dst.output = dn_output;
+ rt->u.dst.input = dn_rt_bug;
+ rt->rt_flags = flags;
+ if (flags & RTCF_LOCAL)
+ rt->u.dst.output = dn_nsp_rx;
- if (neigh->dev->flags & IFF_LOOPBACK)
- rt->u.dst.input = dn_nsp_rx;
+ if (dn_rt_set_next_hop(rt, &res))
+ goto e_neighbour;
- hash = dn_hash(rt->key.saddr, rt->key.daddr);
- dn_insert_route(rt, hash);
- *pprt = &rt->u.dst;
+ hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
+ dn_insert_route(rt, hash, (struct dn_route **)pprt);
+ err = 0;
- return 0;
+done:
+ if (neigh)
+ neigh_release(neigh);
+ if (free_res)
+ dn_fib_res_put(&res);
+ if (dev_out)
+ dev_put(dev_out);
+out:
+ return err;
+
+e_addr:
+ err = -EADDRNOTAVAIL;
+ goto done;
+e_inval:
+ err = -EINVAL;
+ goto done;
+e_nobufs:
+ err = -ENOBUFS;
+ goto done;
+e_neighbour:
+ dst_free(&rt->u.dst);
+ goto e_nobufs;
}
-int dn_route_output(struct dst_entry **pprt, dn_address dst, dn_address src, int flags)
+
+/*
+ * N.B. The flags may be moved into the flowi at some future stage.
+ */
+static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags)
{
- unsigned hash = dn_hash(src, dst);
+ unsigned hash = dn_hash(flp->fld_src, flp->fld_dst);
struct dn_route *rt = NULL;
if (!(flags & MSG_TRYHARD)) {
- read_lock_bh(&dn_rt_hash_table[hash].lock);
+ rcu_read_lock();
for(rt = dn_rt_hash_table[hash].chain; rt; rt = rt->u.rt_next) {
- if ((dst == rt->key.daddr) &&
- (src == rt->key.saddr) &&
- (rt->key.iif == 0) &&
- (rt->key.oif != 0)) {
+ read_barrier_depends();
+ if ((flp->fld_dst == rt->fl.fld_dst) &&
+ (flp->fld_src == rt->fl.fld_src) &&
+#ifdef CONFIG_DECNET_ROUTE_FWMARK
+ (flp->fld_fwmark == rt->fl.fld_fwmark) &&
+#endif
+ (rt->fl.iif == 0) &&
+ (rt->fl.oif == flp->oif)) {
rt->u.dst.lastuse = jiffies;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
- read_unlock_bh(&dn_rt_hash_table[hash].lock);
+ rcu_read_unlock();
*pprt = &rt->u.dst;
return 0;
}
}
- read_unlock_bh(&dn_rt_hash_table[hash].lock);
+ rcu_read_unlock();
}
- return dn_route_output_slow(pprt, dst, src, flags);
+ return dn_route_output_slow(pprt, flp, flags);
+}
+
+static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags)
+{
+ int err;
+
+ err = __dn_route_output_key(pprt, flp, flags);
+ if (err == 0 && flp->proto) {
+ err = xfrm_lookup(pprt, flp, NULL, 0);
+ }
+ return err;
+}
+
+int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags)
+{
+ int err;
+
+ err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
+ if (err == 0 && fl->proto) {
+ err = xfrm_lookup(pprt, fl, sk, !(flags & MSG_DONTWAIT));
+ }
+ return err;
}
static int dn_route_input_slow(struct sk_buff *skb)
{
struct dn_route *rt = NULL;
struct dn_skb_cb *cb = DN_SKB_CB(skb);
- struct net_device *dev = skb->dev;
+ struct net_device *in_dev = skb->dev;
+ struct net_device *out_dev = NULL;
struct dn_dev *dn_db;
struct neighbour *neigh = NULL;
- int (*dnrt_input)(struct sk_buff *skb);
- int (*dnrt_output)(struct sk_buff *skb);
- u32 fwmark = 0;
unsigned hash;
- dn_address saddr = cb->src;
- dn_address daddr = cb->dst;
-#ifdef CONFIG_DECNET_ROUTER
- struct dn_fib_key key;
- struct dn_fib_res res;
- int err;
+ int flags = 0;
+ __u16 gateway = 0;
+ struct flowi fl = { .nl_u = { .dn_u =
+ { .daddr = cb->dst,
+ .saddr = cb->src,
+ .scope = RT_SCOPE_UNIVERSE,
+#ifdef CONFIG_DECNET_ROUTE_FWMARK
+ .fwmark = skb->fwmark
#endif
+ } },
+ .iif = skb->dev->ifindex };
+ struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
+ int err = -EINVAL;
+ int free_res = 0;
- if (dev == NULL)
- return -EINVAL;
+ dev_hold(in_dev);
- if ((dn_db = dev->dn_ptr) == NULL)
- return -EINVAL;
+ if ((dn_db = in_dev->dn_ptr) == NULL)
+ goto out;
+
+ /* Zero source addresses are not allowed */
+ if (fl.fld_src == 0)
+ goto out;
/*
* In this case we've just received a packet from a source
* other nasties. Loopback packets already have the dst attached
* so this only affects packets which have originated elsewhere.
*/
- if (dn_dev_islocal(dev, cb->src))
- return -ENOTUNIQ;
-
- /*
- * Default is to create a drop everything entry
- */
- dnrt_input = dn_blackhole;
- dnrt_output = dn_rt_bug;
-
- /*
- * Is the destination us ?
- */
- if (!dn_dev_islocal(dev, cb->dst))
- goto non_local_input;
+ err = -ENOTUNIQ;
+ if (dn_dev_islocal(in_dev, cb->src))
+ goto out;
- /*
- * Local input... find source of skb
- */
- dnrt_input = dn_nsp_rx;
- dnrt_output = dn_output;
- saddr = cb->dst;
- daddr = cb->src;
-
- if ((neigh = neigh_lookup(&dn_neigh_table, &cb->src, dev)) != NULL)
- goto add_entry;
-
- if (dn_db->router && ((neigh = neigh_clone(dn_db->router)) != NULL))
- goto add_entry;
-
- neigh = neigh_create(&dn_neigh_table, &cb->src, dev);
- if (!IS_ERR(neigh)) {
- if (dev->type == ARPHRD_ETHER)
- memcpy(neigh->ha, skb->mac.ethernet->h_source, ETH_ALEN);
- goto add_entry;
+ err = dn_fib_lookup(&fl, &res);
+ if (err) {
+ if (err != -ESRCH)
+ goto out;
+ /*
+ * Is the destination us ?
+ */
+ if (!dn_dev_islocal(in_dev, cb->dst))
+ goto e_inval;
+
+ res.type = RTN_LOCAL;
+ flags |= RTCF_DIRECTSRC;
+ } else {
+ __u16 src_map = fl.fld_src;
+ free_res = 1;
+
+ out_dev = DN_FIB_RES_DEV(res);
+ if (out_dev == NULL) {
+ if (net_ratelimit())
+ printk(KERN_CRIT "Bug in dn_route_input_slow() "
+ "No output device\n");
+ goto e_inval;
+ }
+ dev_hold(out_dev);
+
+ if (res.r)
+ src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags);
+
+ gateway = DN_FIB_RES_GW(res);
+ if (res.type == RTN_NAT) {
+ fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);
+ dn_fib_res_put(&res);
+ free_res = 0;
+ if (dn_fib_lookup(&fl, &res))
+ goto e_inval;
+ free_res = 1;
+ if (res.type != RTN_UNICAST)
+ goto e_inval;
+ flags |= RTCF_DNAT;
+ gateway = fl.fld_dst;
+ }
+ fl.fld_src = src_map;
}
- return PTR_ERR(neigh);
-
-non_local_input:
-
-#ifdef CONFIG_DECNET_ROUTER
- /*
- * Destination is another node... find next hop in
- * routing table here.
- */
-
- key.src = cb->src;
- key.dst = cb->dst;
- key.iif = dev->ifindex;
- key.oif = 0;
- key.scope = RT_SCOPE_UNIVERSE;
-
-#ifdef CONFIG_DECNET_ROUTE_FWMARK
- key.fwmark = skb->nfmark;
-#else
- key.fwmark = 0;
-#endif
+ switch(res.type) {
+ case RTN_UNICAST:
+ /*
+ * Forwarding check here, we only check for forwarding
+ * being turned off, if you want to only forward intra
+ * area, its up to you to set the routing tables up
+ * correctly.
+ */
+ if (dn_db->parms.forwarding == 0)
+ goto e_inval;
+
+ if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ dn_fib_select_multipath(&fl, &res);
+
+ /*
+ * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
+ * flag as a hint to set the intra-ethernet bit when
+ * forwarding. If we've got NAT in operation, we don't do
+ * this optimisation.
+ */
+ if (out_dev == in_dev && !(flags & RTCF_NAT))
+ flags |= RTCF_DOREDIRECT;
+
+ case RTN_BLACKHOLE:
+ case RTN_UNREACHABLE:
+ break;
+ case RTN_LOCAL:
+ flags |= RTCF_LOCAL;
+ fl.fld_src = cb->dst;
+ fl.fld_dst = cb->src;
+
+ /* Routing tables gave us a gateway */
+ if (gateway)
+ goto make_route;
+
+ /* Packet was intra-ethernet, so we know its on-link */
+ if (cb->rt_flags | DN_RT_F_IE) {
+ gateway = cb->src;
+ flags |= RTCF_DIRECTSRC;
+ goto make_route;
+ }
- if ((err = dn_fib_lookup(&key, &res)) == 0) {
- switch(res.type) {
- case RTN_UNICAST:
- if (res.fi->fib_nhs)
- dn_fib_select_multipath(&key, &res);
- neigh = __neigh_lookup(&dn_neigh_table, &DN_FIB_RES_GW(res), DN_FIB_RES_DEV(res), 1);
- err = -ENOBUFS;
- if (!neigh)
- break;
- err = 0;
- dnrt_input = dn_forward;
- fwmark = key.fwmark;
- break;
- case RTN_UNREACHABLE:
- dnrt_input = dn_blackhole;
- fwmark = key.fwmark;
- break;
- default:
- err = -EINVAL;
+ /* Use the default router if there is one */
+ neigh = neigh_clone(dn_db->router);
+ if (neigh) {
+ gateway = ((struct dn_neigh *)neigh)->addr;
+ goto make_route;
}
- dn_fib_res_put(&res);
- if (err < 0)
- return err;
- goto add_entry;
- }
- return err;
+ /* Close eyes and pray */
+ gateway = cb->src;
+ flags |= RTCF_DIRECTSRC;
+ goto make_route;
+ default:
+ goto e_inval;
+ }
-#endif /* CONFIG_DECNET_ROUTER */
+make_route:
+ rt = dst_alloc(&dn_dst_ops);
+ if (rt == NULL)
+ goto e_nobufs;
+
+ rt->rt_saddr = fl.fld_src;
+ rt->rt_daddr = fl.fld_dst;
+ rt->rt_gateway = fl.fld_dst;
+ if (gateway)
+ rt->rt_gateway = gateway;
+ rt->rt_dst_map = fl.fld_dst;
+ rt->rt_src_map = fl.fld_src;
+
+ rt->fl.fld_src = cb->src;
+ rt->fl.fld_dst = cb->dst;
+ rt->fl.oif = 0;
+ rt->fl.iif = in_dev->ifindex;
+ rt->fl.fld_fwmark = fl.fld_fwmark;
+
+ rt->u.dst.flags = DST_HOST;
+ rt->u.dst.neighbour = neigh;
+ rt->u.dst.dev = out_dev;
+ rt->u.dst.lastuse = jiffies;
+ rt->u.dst.output = dn_rt_bug;
+ switch(res.type) {
+ case RTN_UNICAST:
+ rt->u.dst.input = dn_forward;
+ break;
+ case RTN_LOCAL:
+ rt->u.dst.output = dn_output;
+ rt->u.dst.input = dn_nsp_rx;
+ rt->u.dst.dev = in_dev;
+ flags |= RTCF_LOCAL;
+ break;
+ default:
+ case RTN_UNREACHABLE:
+ case RTN_BLACKHOLE:
+ rt->u.dst.input = dn_blackhole;
+ }
+ rt->rt_flags = flags;
+ if (rt->u.dst.dev)
+ dev_hold(rt->u.dst.dev);
-add_entry:
+ if (dn_rt_set_next_hop(rt, &res))
+ goto e_neighbour;
- if ((rt = dst_alloc(&dn_dst_ops)) == NULL) {
- neigh_release(neigh);
- return -EINVAL;
- }
+ hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
+ dn_insert_route(rt, hash, (struct dn_route **)&skb->dst);
+ err = 0;
- rt->key.saddr = cb->src;
- rt->rt_saddr = saddr;
- rt->key.daddr = cb->dst;
- rt->rt_daddr = daddr;
- rt->key.oif = 0;
- rt->key.iif = dev->ifindex;
- rt->key.fwmark = fwmark;
+done:
+ if (neigh)
+ neigh_release(neigh);
+ if (free_res)
+ dn_fib_res_put(&res);
+ dev_put(in_dev);
+ if (out_dev)
+ dev_put(out_dev);
+out:
+ return err;
- rt->u.dst.neighbour = neigh;
- rt->u.dst.dev = neigh ? neigh->dev : NULL;
- if (rt->u.dst.dev)
- dev_hold(rt->u.dst.dev);
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.output = dnrt_output;
- rt->u.dst.input = dnrt_input;
+e_inval:
+ err = -EINVAL;
+ goto done;
- hash = dn_hash(rt->key.saddr, rt->key.daddr);
- dn_insert_route(rt, hash);
- skb->dst = (struct dst_entry *)rt;
+e_nobufs:
+ err = -ENOBUFS;
+ goto done;
- return 0;
+e_neighbour:
+ dst_free(&rt->u.dst);
+ goto done;
}
int dn_route_input(struct sk_buff *skb)
if (skb->dst)
return 0;
- read_lock(&dn_rt_hash_table[hash].lock);
+ rcu_read_lock();
for(rt = dn_rt_hash_table[hash].chain; rt != NULL; rt = rt->u.rt_next) {
- if ((rt->key.saddr == cb->src) &&
- (rt->key.daddr == cb->dst) &&
- (rt->key.oif == 0) &&
+ read_barrier_depends();
+ if ((rt->fl.fld_src == cb->src) &&
+ (rt->fl.fld_dst == cb->dst) &&
+ (rt->fl.oif == 0) &&
#ifdef CONFIG_DECNET_ROUTE_FWMARK
- (rt->key.fwmark == skb->nfmark) &&
+ (rt->fl.fld_fwmark == skb->nfmark) &&
#endif
- (rt->key.iif == cb->iif)) {
+ (rt->fl.iif == cb->iif)) {
rt->u.dst.lastuse = jiffies;
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
- read_unlock(&dn_rt_hash_table[hash].lock);
+ rcu_read_unlock();
skb->dst = (struct dst_entry *)rt;
return 0;
}
}
- read_unlock(&dn_rt_hash_table[hash].lock);
+ rcu_read_unlock();
return dn_route_input_slow(skb);
}
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
+ struct rta_cacheinfo ci;
nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r));
r = NLMSG_DATA(nlh);
- nlh->nlmsg_flags = nowait ? NLM_F_MULTI : 0;
+ nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0;
r->rtm_family = AF_DECnet;
r->rtm_dst_len = 16;
- r->rtm_src_len = 16;
+ r->rtm_src_len = 0;
r->rtm_tos = 0;
- r->rtm_table = 0;
- r->rtm_type = 0;
- r->rtm_flags = 0;
+ r->rtm_table = RT_TABLE_MAIN;
+ r->rtm_type = rt->rt_type;
+ r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
+ if (rt->rt_flags & RTCF_NOTIFY)
+ r->rtm_flags |= RTM_F_NOTIFY;
RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
- RTA_PUT(skb, RTA_SRC, 2, &rt->rt_saddr);
+ if (rt->fl.fld_src) {
+ r->rtm_src_len = 16;
+ RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
+ }
if (rt->u.dst.dev)
RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
- if (dst_metric(&rt->u.dst, RTAX_WINDOW))
- RTA_PUT(skb, RTAX_WINDOW, sizeof(unsigned),
- &rt->u.dst.metrics[RTAX_WINDOW - 1]);
- if (dst_metric(&rt->u.dst, RTAX_RTT))
- RTA_PUT(skb, RTAX_RTT, sizeof(unsigned),
- &rt->u.dst.metrics[RTAX_RTT]);
+ /*
+ * Note to self - change this if input routes reverse direction when
+ * they deal only with inputs and not with replies like they do
+ * currently.
+ */
+ RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_saddr);
+ if (rt->rt_daddr != rt->rt_gateway)
+ RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
+ if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+ goto rtattr_failure;
+ ci.rta_lastuse = jiffies - rt->u.dst.lastuse;
+ ci.rta_used = rt->u.dst.__use;
+ ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
+ if (rt->u.dst.expires)
+ ci.rta_expires = rt->u.dst.expires - jiffies;
+ else
+ ci.rta_expires = 0;
+ ci.rta_error = rt->u.dst.error;
+ ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
+ RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+ if (rt->fl.iif)
+ RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
{
struct rtattr **rta = arg;
+ struct rtmsg *rtm = NLMSG_DATA(nlh);
struct dn_route *rt = NULL;
struct dn_skb_cb *cb;
- dn_address dst = 0;
- dn_address src = 0;
- int iif = 0;
int err;
struct sk_buff *skb;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
cb = DN_SKB_CB(skb);
if (rta[RTA_SRC-1])
- memcpy(&src, RTA_DATA(rta[RTA_SRC-1]), 2);
+ memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);
if (rta[RTA_DST-1])
- memcpy(&dst, RTA_DATA(rta[RTA_DST-1]), 2);
+ memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
if (rta[RTA_IIF-1])
- memcpy(&iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
+ memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
- if (iif) {
+ if (fl.iif) {
struct net_device *dev;
- if ((dev = dev_get_by_index(iif)) == NULL) {
+ if ((dev = dev_get_by_index(fl.iif)) == NULL) {
kfree_skb(skb);
return -ENODEV;
}
}
skb->protocol = __constant_htons(ETH_P_DNA_RT);
skb->dev = dev;
- cb->src = src;
- cb->dst = dst;
+ cb->src = fl.fld_src;
+ cb->dst = fl.fld_dst;
local_bh_disable();
err = dn_route_input(skb);
local_bh_enable();
memset(cb, 0, sizeof(struct dn_skb_cb));
rt = (struct dn_route *)skb->dst;
+ if (!err && -rt->u.dst.error)
+ err = rt->u.dst.error;
} else {
- err = dn_route_output((struct dst_entry **)&rt, dst, src, 0);
+ int oif = 0;
+ if (rta[RTA_OIF - 1])
+ memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
+ fl.oif = oif;
+ err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
}
- if (!err && rt->u.dst.error)
- err = rt->u.dst.error;
if (skb->dev)
dev_put(skb->dev);
skb->dev = NULL;
if (err)
goto out_free;
skb->dst = &rt->u.dst;
+ if (rtm->rtm_flags & RTM_F_NOTIFY)
+ rt->rt_flags |= RTCF_NOTIFY;
NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
continue;
if (h > s_h)
s_idx = 0;
- read_lock_bh(&dn_rt_hash_table[h].lock);
+ rcu_read_lock();
for(rt = dn_rt_hash_table[h].chain, idx = 0; rt; rt = rt->u.rt_next, idx++) {
+ read_barrier_depends();
if (idx < s_idx)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) {
dst_release(xchg(&skb->dst, NULL));
- read_unlock_bh(&dn_rt_hash_table[h].lock);
+ rcu_read_unlock();
goto done;
}
dst_release(xchg(&skb->dst, NULL));
}
- read_unlock_bh(&dn_rt_hash_table[h].lock);
+ rcu_read_unlock();
}
done:
char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
for(i = 0; i <= dn_rt_hash_mask; i++) {
- read_lock_bh(&dn_rt_hash_table[i].lock);
+ rcu_read_lock();
rt = dn_rt_hash_table[i].chain;
for(; rt != NULL; rt = rt->u.rt_next) {
+ read_barrier_depends();
len += sprintf(buffer + len, "%-8s %-7s %-7s %04d %04d %04d\n",
rt->u.dst.dev ? rt->u.dst.dev->name : "*",
dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1),
if (pos > offset + length)
break;
}
- read_unlock_bh(&dn_rt_hash_table[i].lock);
+ rcu_read_unlock();
if (pos > offset + length)
break;
}
dn_rt_hash_mask--;
for(i = 0; i <= dn_rt_hash_mask; i++) {
- dn_rt_hash_table[i].lock = RW_LOCK_UNLOCKED;
+ dn_rt_hash_table[i].lock = SPIN_LOCK_UNLOCKED;
dn_rt_hash_table[i].chain = NULL;
}
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <linux/in_route.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_fib.h>
#include <net/dn_neigh.h>
dn_address r_srcmask;
dn_address r_dst;
dn_address r_dstmask;
+ dn_address r_srcmap;
u8 r_flags;
#ifdef CONFIG_DECNET_ROUTE_FWMARK
u32 r_fwmark;
static struct dn_fib_rule default_rule = {
.r_clntref = ATOMIC_INIT(2),
.r_preference = 0x7fff,
- .r_table = DN_DEFAULT_TABLE,
+ .r_table = RT_TABLE_MAIN,
.r_action = RTN_UNICAST
};
memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
if (rta[RTA_DST-1])
memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 2);
+ if (rta[RTA_GATEWAY-1])
+ memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 2);
new_r->r_src_len = rtm->rtm_src_len;
new_r->r_dst_len = rtm->rtm_dst_len;
new_r->r_srcmask = dnet_make_mask(rtm->rtm_src_len);
}
-int dn_fib_lookup(struct dn_fib_key *key, struct dn_fib_res *res)
+int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
{
struct dn_fib_rule *r, *policy;
struct dn_fib_table *tb;
- dn_address saddr = key->src;
- dn_address daddr = key->dst;
+ dn_address saddr = flp->fld_src;
+ dn_address daddr = flp->fld_dst;
int err;
read_lock(&dn_fib_rules_lock);
if (((saddr^r->r_src) & r->r_srcmask) ||
((daddr^r->r_dst) & r->r_dstmask) ||
#ifdef CONFIG_DECNET_ROUTE_FWMARK
- (r->r_fwmark && r->r_fwmark != key->fwmark) ||
+ (r->r_fwmark && r->r_fwmark != flp->fld_fwmark) ||
#endif
- (r->r_ifindex && r->r_ifindex != key->iif))
+ (r->r_ifindex && r->r_ifindex != flp->iif))
continue;
switch(r->r_action) {
case RTN_UNICAST:
+ case RTN_NAT:
policy = r;
break;
case RTN_UNREACHABLE:
if ((tb = dn_fib_get_table(r->r_table, 0)) == NULL)
continue;
- err = tb->lookup(tb, key, res);
+ err = tb->lookup(tb, flp, res);
if (err == 0) {
res->r = policy;
if (policy)
return -ESRCH;
}
+unsigned dnet_addr_type(__u16 addr)
+{
+ struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } };
+ struct dn_fib_res res;
+ unsigned ret = RTN_UNICAST;
+ struct dn_fib_table *tb = dn_fib_tables[RT_TABLE_LOCAL];
+
+ res.r = NULL;
+
+ if (tb) {
+ if (!tb->lookup(tb, &fl, &res)) {
+ ret = res.type;
+ dn_fib_res_put(&res);
+ }
+ }
+ return ret;
+}
+
+__u16 dn_fib_rules_policy(__u16 saddr, struct dn_fib_res *res, unsigned *flags)
+{
+ struct dn_fib_rule *r = res->r;
+
+ if (r->r_action == RTN_NAT) {
+ int addrtype = dnet_addr_type(r->r_srcmap);
+
+ if (addrtype == RTN_NAT) {
+ saddr = (saddr&~r->r_srcmask)|r->r_srcmap;
+ *flags |= RTCF_SNAT;
+ } else if (addrtype == RTN_LOCAL || r->r_srcmap == 0) {
+ saddr = r->r_srcmap;
+ *flags |= RTCF_MASQ;
+ }
+ }
+ return saddr;
+}
+
static void dn_fib_rules_detach(struct net_device *dev)
{
struct dn_fib_rule *r;
RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
if (r->r_preference)
RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
+ if (r->r_srcmap)
+ RTA_PUT(skb, RTA_GATEWAY, 2, &r->r_srcmap);
nlh->nlmsg_len = skb->tail - b;
return skb->len;
#include <linux/route.h> /* RTF_xxx */
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <net/dn.h>
#include <net/dn_route.h>
#include <net/dn_fib.h>
u32 dz_hashmask;
#define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
int dz_order;
- u32 dz_mask;
+ u16 dz_mask;
#define DZ_MASK(dz) ((dz)->dz_mask)
};
#define DN_FIB_SCAN_KEY(f, fp, key) \
for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
+#define RT_TABLE_MIN 1
static rwlock_t dn_fib_tables_lock = RW_LOCK_UNLOCKED;
-static struct dn_fib_table *dn_fib_tables[DN_NUM_TABLES + 1];
+struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1];
static kmem_cache_t *dn_hash_kmem;
static int dn_fib_hash_zombies;
-static __inline__ dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
+static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
{
- u32 h = ntohs(key.datum)>>(16 - dz->dz_order);
+ u16 h = ntohs(key.datum)>>(16 - dz->dz_order);
h ^= (h >> 10);
h ^= (h >> 6);
- h ^= (h >> 3);
h &= DZ_HASHMASK(dz);
return *(dn_fib_idx_t *)&h;
}
-static __inline__ dn_fib_key_t dz_key(u16 dst, struct dn_zone *dz)
+static inline dn_fib_key_t dz_key(u16 dst, struct dn_zone *dz)
{
dn_fib_key_t k;
k.datum = dst & DZ_MASK(dz);
return k;
}
-static __inline__ struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
+static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
{
return &dz->dz_hash[dn_hash(key, dz).datum];
}
-static __inline__ struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
+static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
{
return dz->dz_hash[dn_hash(key, dz).datum];
}
-static __inline__ int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
+static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
{
return a.datum == b.datum;
}
-static __inline__ int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
+static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
{
return a.datum <= b.datum;
}
-static __inline__ void dn_rebuild_zone(struct dn_zone *dz,
- struct dn_fib_node **old_ht,
- int old_divisor)
+static inline void dn_rebuild_zone(struct dn_zone *dz,
+ struct dn_fib_node **old_ht,
+ int old_divisor)
{
int i;
struct dn_fib_node *f, **fp, *next;
rtm->rtm_protocol = fi->fib_protocol;
if (fi->fib_priority)
RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
+ if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+ goto rtattr_failure;
if (fi->fib_nhs == 1) {
if (fi->fib_nh->nh_gw)
RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
return -ESRCH;
}
-static __inline__ int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
+static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
{
int found = 0;
struct dn_fib_node *f;
return found;
}
-static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct dn_fib_key *
-key, struct dn_fib_res *res)
+static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
{
int err;
struct dn_zone *dz;
read_lock(&dn_fib_tables_lock);
for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
struct dn_fib_node *f;
- dn_fib_key_t k = dz_key(key->dst, dz);
+ dn_fib_key_t k = dz_key(flp->fld_dst, dz);
for(f = dz_chain(k, dz); f; f = f->fn_next) {
- if (!dn_key_leq(k, f->fn_key))
- break;
- else
- continue;
+ if (!dn_key_eq(k, f->fn_key)) {
+ if (dn_key_leq(k, f->fn_key))
+ break;
+ else
+ continue;
+ }
f->fn_state |= DN_S_ACCESSED;
if (f->fn_state&DN_S_ZOMBIE)
continue;
- if (f->fn_scope < key->scope)
+
+ if (f->fn_scope < flp->fld_scope)
continue;
- err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), key, res);
+ err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
+
if (err == 0) {
res->type = f->fn_type;
- res->scope = f->fn_scope;
+ res->scope = f->fn_scope;
res->prefixlen = dz->dz_order;
goto out;
}
if (fi) {
len = sprintf(buffer, "%s\t%04x\t%04x\t%04x\t%d\t%u\t%d\t%04x\t%d\t%u\t%u",
- fi->fib_dev ? fi->fib_dev->name : "*", prefix,
+ fi->dn_fib_dev ? fi->dn_fib_dev->name : "*", prefix,
fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
mask, 0, 0, 0);
} else {
{
struct dn_fib_table *t;
- if (n < DN_MIN_TABLE)
+ if (n < RT_TABLE_MIN)
return NULL;
- if (n > DN_NUM_TABLES)
+ if (n > RT_TABLE_MAX)
return NULL;
if (dn_fib_tables[n])
printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
return NULL;
}
- if ((t = kmalloc(sizeof(struct dn_fib_table), GFP_KERNEL)) == NULL)
+ if ((t = kmalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), GFP_KERNEL)) == NULL)
return NULL;
memset(t, 0, sizeof(struct dn_fib_table));
t->get_info = dn_fib_table_get_info;
#endif
t->dump = dn_fib_table_dump;
+ memset(t->data, 0, sizeof(struct dn_hash));
dn_fib_tables[n] = t;
return t;
{
int id;
- for(id = DN_MIN_TABLE; id <= DN_NUM_TABLES; id++)
+ for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
if (dn_fib_tables[id] == NULL)
return dn_fib_get_table(id, 1);
return NULL;
{
int i;
- for (i = 0; i < DN_NUM_TABLES + 1; ++i)
+ for (i = RT_TABLE_MIN; i <= RT_TABLE_MAX; ++i)
dn_fib_del_tree(i);
return;
#include <linux/spinlock.h>
#include <net/sock.h>
#include <asm/atomic.h>
+#include <net/flow.h>
#include <net/dn.h>
/*
#include <linux/string.h>
#include <net/neighbour.h>
#include <net/dst.h>
+#include <net/flow.h>
#include <asm/uaccess.h>