+int rttrash = 0; /* routes not in table but not freed */
+
+static unsigned int rte_debug;
+
+/* Possible flags for rte_debug */
+#define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */
+#define RTD_TRACE 0x2 /* trace alloc, free and refcnt */
+#define RTD_NO_FREE 0x4 /* don't free (good to catch corruptions) */
+
+static struct zone *rte_zone; /* special zone for rtentry */
+#define RTE_ZONE_MAX 65536 /* maximum elements in zone */
+#define RTE_ZONE_NAME "rtentry" /* name of rtentry zone */
+
+#define RTD_INUSE 0xFEEDFACE /* entry is in use */
+#define RTD_FREED 0xDEADBEEF /* entry is freed */
+
+#define RTD_TRSTACK_SIZE 8 /* depth of stack trace */
+#define RTD_REFHIST_SIZE 4 /* refcnt history size */
+
+/*
+ * Debug variant of rtentry structure.
+ */
+struct rtentry_dbg {
+ struct rtentry rtd_entry; /* rtentry */
+ struct rtentry rtd_entry_saved; /* saved rtentry */
+ u_int32_t rtd_inuse; /* in use pattern */
+ u_int16_t rtd_refhold_cnt; /* # of rtref */
+ u_int16_t rtd_refrele_cnt; /* # of rtunref */
+ /*
+ * Thread and PC stack trace up to RTD_TRSTACK_SIZE
+ * deep during alloc and free.
+ */
+ struct thread *rtd_alloc_thread;
+ void *rtd_alloc_stk_pc[RTD_TRSTACK_SIZE];
+ struct thread *rtd_free_thread;
+ void *rtd_free_stk_pc[RTD_TRSTACK_SIZE];
+ /*
+ * Circular lists of rtref and rtunref callers.
+ */
+ u_int16_t rtd_refhold_next;
+ u_int16_t rtd_refrele_next;
+ struct {
+ struct thread *th;
+ void *pc[RTD_TRSTACK_SIZE];
+ } rtd_refhold[RTD_REFHIST_SIZE];
+ struct {
+ struct thread *th;
+ void *pc[RTD_TRSTACK_SIZE];
+ } rtd_refrele[RTD_REFHIST_SIZE];
+ /*
+ * Trash list linkage
+ */
+ TAILQ_ENTRY(rtentry_dbg) rtd_trash_link;
+};