Lines Matching full:flow
32 * Each flow has a CoDel managed queue.
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
39 * Low memory footprint (64 bytes per flow)
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
118 struct sk_buff *skb = flow->head; in dequeue_head()
120 flow->head = skb->next; in dequeue_head()
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
129 if (flow->head == NULL) in flow_queue_add()
130 flow->head = skb; in flow_queue_add()
132 flow->tail->next = skb; in flow_queue_add()
133 flow->tail = skb; in flow_queue_add()
143 struct fq_codel_flow *flow; in fq_codel_drop() local
147 /* Queue is full! Find the fat flow and drop packet(s) from it. in fq_codel_drop()
151 * In stress mode, we'll try to drop 64 packets from the flow, in fq_codel_drop()
161 /* Our goal is to drop half of this fat flow backlog */ in fq_codel_drop()
164 flow = &q->flows[idx]; in fq_codel_drop()
168 skb = dequeue_head(flow); in fq_codel_drop()
176 flow->cvars.count += i; in fq_codel_drop()
190 struct fq_codel_flow *flow; in fq_codel_enqueue() local
205 flow = &q->flows[idx]; in fq_codel_enqueue()
206 flow_queue_add(flow, skb); in fq_codel_enqueue()
210 if (list_empty(&flow->flowchain)) { in fq_codel_enqueue()
211 list_add_tail(&flow->flowchain, &q->new_flows); in fq_codel_enqueue()
213 flow->deficit = q->quantum; in fq_codel_enqueue()
227 * in q->backlogs[] to find a fat flow. in fq_codel_enqueue()
240 * If we dropped a packet for this flow, return NET_XMIT_CN, in fq_codel_enqueue()
260 struct fq_codel_flow *flow; in dequeue_func() local
263 flow = container_of(vars, struct fq_codel_flow, cvars); in dequeue_func()
264 if (flow->head) { in dequeue_func()
265 skb = dequeue_head(flow); in dequeue_func()
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
286 struct fq_codel_flow *flow; in fq_codel_dequeue() local
296 flow = list_first_entry(head, struct fq_codel_flow, flowchain); in fq_codel_dequeue()
298 if (flow->deficit <= 0) { in fq_codel_dequeue()
299 flow->deficit += q->quantum; in fq_codel_dequeue()
300 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
305 &flow->cvars, &q->cstats, qdisc_pkt_len, in fq_codel_dequeue()
311 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
313 list_del_init(&flow->flowchain); in fq_codel_dequeue()
317 flow->deficit -= qdisc_pkt_len(skb); in fq_codel_dequeue()
328 static void fq_codel_flow_purge(struct fq_codel_flow *flow) in fq_codel_flow_purge() argument
330 rtnl_kfree_skbs(flow->head, flow->tail); in fq_codel_flow_purge()
331 flow->head = NULL; in fq_codel_flow_purge()
342 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() local
344 fq_codel_flow_purge(flow); in fq_codel_reset()
345 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_reset()
346 codel_vars_init(&flow->cvars); in fq_codel_reset()
510 struct fq_codel_flow *flow = q->flows + i; in fq_codel_init() local
512 INIT_LIST_HEAD(&flow->flowchain); in fq_codel_init()
513 codel_vars_init(&flow->cvars); in fq_codel_init()
650 const struct fq_codel_flow *flow = &q->flows[idx]; in fq_codel_dump_class_stats() local
655 xstats.class_stats.deficit = flow->deficit; in fq_codel_dump_class_stats()
657 codel_time_to_us(flow->cvars.ldelay); in fq_codel_dump_class_stats()
658 xstats.class_stats.count = flow->cvars.count; in fq_codel_dump_class_stats()
659 xstats.class_stats.lastcount = flow->cvars.lastcount; in fq_codel_dump_class_stats()
660 xstats.class_stats.dropping = flow->cvars.dropping; in fq_codel_dump_class_stats()
661 if (flow->cvars.dropping) { in fq_codel_dump_class_stats()
662 codel_tdiff_t delta = flow->cvars.drop_next - in fq_codel_dump_class_stats()
669 if (flow->head) { in fq_codel_dump_class_stats()
671 skb = flow->head; in fq_codel_dump_class_stats()