ipmr_base.c (845c9a7ae7f5342ba42280c3a2f2aa92bce641d7) ipmr_base.c (c8d6196803265484f7e1cdd1b00a188dc59a5988)
1/* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
3 */
4
5#include <linux/mroute_base.h>
6
7/* Sets everything common except 'dev', since that is done under locking */
8void vif_device_init(struct vif_device *v,

--- 89 unchanged lines hidden (view full) ---

98 proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
99 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
100 return c;
101 }
102
103 return mr_mfc_find_any_parent(mrt, vifi);
104}
105EXPORT_SYMBOL(mr_mfc_find_any);
1/* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
3 */
4
5#include <linux/mroute_base.h>
6
7/* Sets everything common except 'dev', since that is done under locking */
8void vif_device_init(struct vif_device *v,

--- 89 unchanged lines hidden (view full) ---

98 proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
99 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
100 return c;
101 }
102
103 return mr_mfc_find_any_parent(mrt, vifi);
104}
105EXPORT_SYMBOL(mr_mfc_find_any);
106
107#ifdef CONFIG_PROC_FS
108void *mr_mfc_seq_idx(struct net *net,
109 struct mr_mfc_iter *it, loff_t pos)
110{
111 struct mr_table *mrt = it->mrt;
112 struct mr_mfc *mfc;
113
114 rcu_read_lock();
115 it->cache = &mrt->mfc_cache_list;
116 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
117 if (pos-- == 0)
118 return mfc;
119 rcu_read_unlock();
120
121 spin_lock_bh(it->lock);
122 it->cache = &mrt->mfc_unres_queue;
123 list_for_each_entry(mfc, it->cache, list)
124 if (pos-- == 0)
125 return mfc;
126 spin_unlock_bh(it->lock);
127
128 it->cache = NULL;
129 return NULL;
130}
131EXPORT_SYMBOL(mr_mfc_seq_idx);
132
133void *mr_mfc_seq_next(struct seq_file *seq, void *v,
134 loff_t *pos)
135{
136 struct mr_mfc_iter *it = seq->private;
137 struct net *net = seq_file_net(seq);
138 struct mr_table *mrt = it->mrt;
139 struct mr_mfc *c = v;
140
141 ++*pos;
142
143 if (v == SEQ_START_TOKEN)
144 return mr_mfc_seq_idx(net, seq->private, 0);
145
146 if (c->list.next != it->cache)
147 return list_entry(c->list.next, struct mr_mfc, list);
148
149 if (it->cache == &mrt->mfc_unres_queue)
150 goto end_of_list;
151
152 /* exhausted cache_array, show unresolved */
153 rcu_read_unlock();
154 it->cache = &mrt->mfc_unres_queue;
155
156 spin_lock_bh(it->lock);
157 if (!list_empty(it->cache))
158 return list_first_entry(it->cache, struct mr_mfc, list);
159
160end_of_list:
161 spin_unlock_bh(it->lock);
162 it->cache = NULL;
163
164 return NULL;
165}
166EXPORT_SYMBOL(mr_mfc_seq_next);
167#endif