long tw_ts_recent_stamp;
unsigned long tw_ttd;
struct tcp_bind_bucket *tw_tb;
- struct tcp_tw_bucket *tw_next_death;
- struct tcp_tw_bucket **tw_pprev_death;
-
+ struct hlist_node tw_death_node;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr tw_v6_daddr;
struct in6_addr tw_v6_rcv_saddr;
hlist_add_head(&tw->tw_bind_node, list);
}
+static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
+{
+ return tw->tw_death_node.pprev != NULL;
+}
+
+static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
+{
+ tw->tw_death_node.pprev = NULL;
+}
+
+static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
+{
+ __hlist_del(&tw->tw_death_node);
+ tw_dead_node_init(tw);
+}
+
+static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
+{
+ if (tw_dead_hashed(tw)) {
+ __tw_del_dead_node(tw);
+ return 1;
+ }
+ return 0;
+}
+
#define tw_for_each(tw, node, head) \
hlist_for_each_entry(tw, node, head, tw_node)
+#define tw_for_each_inmate(tw, node, safe, jail) \
+ hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
+
#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
extern kmem_cache_t *tcp_timewait_cachep;
tw->tw_rcv_wnd = tcp_receive_window(tp);
tw->tw_ts_recent = tp->ts_recent;
tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
- tw->tw_pprev_death = NULL;
+ tw_dead_node_init(tw);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (tw->tw_family == PF_INET6) {
}
/* Kill off TIME_WAIT sockets once their lifetime has expired. */
-static int tcp_tw_death_row_slot = 0;
+static int tcp_tw_death_row_slot;
static void tcp_twkill(unsigned long);
#define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
-static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
+static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
static void tcp_twkill(unsigned long dummy)
{
struct tcp_tw_bucket *tw;
+ struct hlist_node *node, *safe;
int killed = 0;
/* NOTE: compare this to previous version where lock
if (tcp_tw_count == 0)
goto out;
- while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) {
- tcp_tw_death_row[tcp_tw_death_row_slot] = tw->tw_next_death;
- if (tw->tw_next_death)
- tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
- tw->tw_pprev_death = NULL;
+ tw_for_each_inmate(tw, node, safe,
+ &tcp_tw_death_row[tcp_tw_death_row_slot]) {
+ __tw_del_dead_node(tw);
spin_unlock(&tw_death_lock);
-
tcp_timewait_kill(tw);
tcp_tw_put(tw);
-
killed++;
-
spin_lock(&tw_death_lock);
}
tcp_tw_death_row_slot =
void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
{
spin_lock(&tw_death_lock);
- if (tw->tw_pprev_death) {
- if (tw->tw_next_death)
- tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
- *tw->tw_pprev_death = tw->tw_next_death;
- tw->tw_pprev_death = NULL;
+ if (tw_del_dead_node(tw)) {
tcp_tw_put(tw);
if (--tcp_tw_count == 0)
del_timer(&tcp_tw_timer);
static void tcp_twcal_tick(unsigned long);
static struct timer_list tcp_twcal_timer =
TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
-static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
+static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
{
- struct tcp_tw_bucket **tpp;
+ struct hlist_head *list;
int slot;
/* timeout := RTO * 3.5
spin_lock(&tw_death_lock);
/* Unlink it, if it was scheduled */
- if (tw->tw_pprev_death) {
- if (tw->tw_next_death)
- tw->tw_next_death->tw_pprev_death = tw->tw_pprev_death;
- *tw->tw_pprev_death = tw->tw_next_death;
- tw->tw_pprev_death = NULL;
+ if (tw_del_dead_node(tw))
tcp_tw_count--;
- } else
+ else
atomic_inc(&tw->tw_refcnt);
if (slot >= TCP_TW_RECYCLE_SLOTS) {
}
tw->tw_ttd = jiffies + timeo;
slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
- tpp = &tcp_tw_death_row[slot];
+ list = &tcp_tw_death_row[slot];
} else {
tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
}
- tpp = &tcp_twcal_row[slot];
+ list = &tcp_twcal_row[slot];
}
- if ((tw->tw_next_death = *tpp) != NULL)
- (*tpp)->tw_pprev_death = &tw->tw_next_death;
- *tpp = tw;
- tw->tw_pprev_death = tpp;
+ hlist_add_head(&tw->tw_death_node, list);
if (tcp_tw_count++ == 0)
mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
if (time_before_eq(j, now)) {
+ struct hlist_node *node, *safe;
struct tcp_tw_bucket *tw;
- while((tw = tcp_twcal_row[slot]) != NULL) {
- tcp_twcal_row[slot] = tw->tw_next_death;
- tw->tw_pprev_death = NULL;
-
+ tw_for_each_inmate(tw, node, safe,
+ &tcp_twcal_row[slot]) {
+ __tw_del_dead_node(tw);
tcp_timewait_kill(tw);
tcp_tw_put(tw);
killed++;
tcp_twcal_hand = slot;
}
- if (tcp_twcal_row[slot] != NULL) {
+ if (!hlist_empty(&tcp_twcal_row[slot])) {
mod_timer(&tcp_twcal_timer, j);
goto out;
}