Lines Matching full:qp

78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
85 struct ipq *qp = container_of(q, struct ipq, q);
91 qp->ecn = 0;
99 qp->peer = p;
104 struct ipq *qp;
106 qp = container_of(q, struct ipq, q);
107 if (qp->peer)
108 inet_putpeer(qp->peer);
130 struct ipq *qp;
133 qp = container_of(frag, struct ipq, q);
134 net = qp->q.fqdir->net;
139 if (READ_ONCE(qp->q.fqdir->dead))
142 spin_lock(&qp->q.lock);
144 if (qp->q.flags & INET_FRAG_COMPLETE)
147 qp->q.flags |= INET_FRAG_DROP;
148 inet_frag_kill(&qp->q, &refs);
152 if (!(qp->q.flags & INET_FRAG_FIRST_IN))
159 head = inet_frag_pull_head(&qp->q);
162 head->dev = dev_get_by_index_rcu(net, qp->iif);
178 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
182 spin_unlock(&qp->q.lock);
187 spin_unlock(&qp->q.lock);
191 inet_frag_putn(&qp->q, refs);
218 static int ip_frag_too_far(struct ipq *qp)
220 struct inet_peer *peer = qp->peer;
221 unsigned int max = qp->q.fqdir->max_dist;
229 start = qp->rid;
231 qp->rid = end;
233 rc = qp->q.fragments_tail && (end - start) > max;
236 __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS);
241 static int ip_frag_reinit(struct ipq *qp)
245 if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) {
246 refcount_inc(&qp->q.refcnt);
250 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments,
252 sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
254 qp->q.flags = 0;
255 qp->q.len = 0;
256 qp->q.meat = 0;
257 qp->q.rb_fragments = RB_ROOT;
258 qp->q.fragments_tail = NULL;
259 qp->q.last_run_head = NULL;
260 qp->iif = 0;
261 qp->ecn = 0;
267 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb, int *refs)
269 struct net *net = qp->q.fqdir->net;
279 if (qp->q.flags & INET_FRAG_COMPLETE) {
285 unlikely(ip_frag_too_far(qp)) &&
286 unlikely(err = ip_frag_reinit(qp))) {
287 inet_frag_kill(&qp->q, refs);
307 if (end < qp->q.len ||
308 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
310 qp->q.flags |= INET_FRAG_LAST_IN;
311 qp->q.len = end;
318 if (end > qp->q.len) {
320 if (qp->q.flags & INET_FRAG_LAST_IN)
322 qp->q.len = end;
341 prev_tail = qp->q.fragments_tail;
342 err = inet_frag_queue_insert(&qp->q, skb, offset, end);
347 qp->iif = dev->ifindex;
349 qp->q.stamp = skb->tstamp;
350 qp->q.tstamp_type = skb->tstamp_type;
351 qp->q.meat += skb->len;
352 qp->ecn |= ecn;
353 add_frag_mem_limit(qp->q.fqdir, skb->truesize);
355 qp->q.flags |= INET_FRAG_FIRST_IN;
359 if (fragsize > qp->q.max_size)
360 qp->q.max_size = fragsize;
363 fragsize > qp->max_df_size)
364 qp->max_df_size = fragsize;
366 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
367 qp->q.meat == qp->q.len) {
371 err = ip_frag_reasm(qp, skb, prev_tail, dev, refs);
374 inet_frag_kill(&qp->q, refs);
391 inet_frag_kill(&qp->q, refs);
398 static bool ip_frag_coalesce_ok(const struct ipq *qp)
400 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
404 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
408 struct net *net = qp->q.fqdir->net;
414 inet_frag_kill(&qp->q, refs);
416 ecn = ip_frag_ecn_table[qp->ecn];
423 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
427 len = ip_hdrlen(skb) + qp->q.len;
432 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
433 ip_frag_coalesce_ok(qp));
436 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
450 if (qp->max_df_size == qp->q.max_size) {
460 qp->q.rb_fragments = RB_ROOT;
461 qp->q.fragments_tail = NULL;
462 qp->q.last_run_head = NULL;
466 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
470 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
480 struct ipq *qp;
489 qp = ip_find(net, ip_hdr(skb), user, vif);
490 if (qp) {
493 spin_lock(&qp->q.lock);
495 ret = ip_frag_queue(qp, skb, &refs);
497 spin_unlock(&qp->q.lock);
499 inet_frag_putn(&qp->q, refs);