* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes:
+ *
+ * Harald Welte: <laforge@gnumonks.org>
+ * - Add neighbour cache statistics like rtstat
*/
/* The following flags & states are exported to user space,
struct neigh_statistics
{
- unsigned long allocs;
- unsigned long res_failed;
- unsigned long rcv_probes_mcast;
- unsigned long rcv_probes_ucast;
+ unsigned long allocs; /* number of allocated neighs */
+ unsigned long destroys; /* number of destroyed neighs */
+ unsigned long hash_grows; /* number of hash resizes */
+
+ unsigned long res_failed; /* nomber of failed resolutions */
+
+ unsigned long lookups; /* number of lookups */
+ unsigned long hits; /* number of hits (among lookups) */
+
+ unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */
+ unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
+
+ unsigned long periodic_gc_runs; /* number of periodic GC runs */
+ unsigned long forced_gc_runs; /* number of forced GC runs */
};
+#define NEIGH_CACHE_STAT_INC(tbl, field) \
+ (per_cpu_ptr((tbl)->stats, smp_processor_id())->field++)
+
struct neighbour
{
struct neighbour *next;
unsigned long last_rand;
struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep;
- struct neigh_statistics stats;
+ struct neigh_statistics *stats;
struct neighbour **hash_buckets;
unsigned int hash_mask;
__u32 hash_rnd;
unsigned int hash_chain_gc;
struct pneigh_entry **phash_buckets;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *pde;
+#endif
};
/* flags for neigh_update() */
*
* Fixes:
* Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
+ * Harald Welte Add neighbour cache statistics like rtstat
*/
#include <linux/config.h>
#include <linux/socket.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static int neigh_glbl_allocs;
static struct neigh_table *neigh_tables;
+static struct file_operations neigh_stat_seq_fops;
/*
Neighbour hash table buckets are protected with rwlock tbl->lock.
int shrunk = 0;
int i;
+ NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+
write_lock_bh(&tbl->lock);
for (i = 0; i <= tbl->hash_mask; i++) {
struct neighbour *n, **np;
init_timer(&n->timer);
n->timer.function = neigh_timer_handler;
n->timer.data = (unsigned long)n;
- tbl->stats.allocs++;
+
+ NEIGH_CACHE_STAT_INC(tbl, allocs);
neigh_glbl_allocs++;
tbl->entries++;
n->tbl = tbl;
struct neighbour **new_hash, **old_hash;
unsigned int i, new_hash_mask, old_entries;
+ NEIGH_CACHE_STAT_INC(tbl, hash_grows);
+
BUG_ON(new_entries & (new_entries - 1));
new_hash = neigh_hash_alloc(new_entries);
if (!new_hash)
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
+
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
int key_len = tbl->key_len;
u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
+
read_lock_bh(&tbl->lock);
for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
if (!memcmp(n->primary_key, pkey, key_len)) {
neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
{
struct hh_cache *hh;
+ NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
+
if (!neigh->dead) {
printk(KERN_WARNING
"Destroying alive neighbour %p\n", neigh);
struct neighbour *n, **np;
unsigned long expire, now = jiffies;
+ NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
+
write_lock(&tbl->lock);
/*
neigh->nud_state = NUD_FAILED;
notify = 1;
- neigh->tbl->stats.res_failed++;
+ NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
/* It is very thin place. report_unreachable is very complicated
if (!tbl->kmem_cachep)
panic("cannot create neighbour cache");
+ tbl->stats = alloc_percpu(struct neigh_statistics);
+ if (!tbl->stats)
+ panic("cannot create neighbour cache statistics");
+
+#ifdef CONFIG_PROC_FS
+#define NC_STAT_SUFFIX "_stat"
+ {
+ char *proc_stat_name;
+ proc_stat_name = kmalloc(strlen(tbl->id) +
+ strlen(NC_STAT_SUFFIX) + 1, GFP_KERNEL);
+ if (!proc_stat_name)
+ panic("cannot allocate neighbour cache proc name buffer");
+ strcpy(proc_stat_name, tbl->id);
+ strcat(proc_stat_name, NC_STAT_SUFFIX);
+
+ tbl->pde = create_proc_entry(proc_stat_name, 0, proc_net);
+ if (!tbl->pde)
+ panic("cannot create neighbour proc dir entry");
+ tbl->pde->proc_fops = &neigh_stat_seq_fops;
+ tbl->pde->data = tbl;
+ }
+#endif
+
tbl->hash_mask = 1;
tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
}
EXPORT_SYMBOL(neigh_seq_stop);
+/* statistics via seq_file */
+
+static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
+{
+
+}
+
+static int neigh_stat_seq_show(struct seq_file *seq, void *v)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ struct neigh_statistics *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
+ "%08lx %08lx %08lx %08lx\n",
+ tbl->entries,
+
+ st->allocs,
+ st->destroys,
+ st->hash_grows,
+
+ st->lookups,
+ st->hits,
+
+ st->res_failed,
+
+ st->rcv_probes_mcast,
+ st->rcv_probes_ucast,
+
+ st->periodic_gc_runs,
+ st->forced_gc_runs
+ );
+
+ return 0;
+}
+
+static struct seq_operations neigh_stat_seq_ops = {
+ .start = neigh_stat_seq_start,
+ .next = neigh_stat_seq_next,
+ .stop = neigh_stat_seq_stop,
+ .show = neigh_stat_seq_show,
+};
+
+static int neigh_stat_seq_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &neigh_stat_seq_ops);
+
+ if (!ret) {
+ struct seq_file *sf = file->private_data;
+ sf->private = PDE(inode);
+ }
+ return ret;
+};
+
+static struct file_operations neigh_stat_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = neigh_stat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_ARPD