1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_skbprio.c  SKB Priority Queue.
4  *
5  * Authors:	Nishanth Devarajan, <[email protected]>
6  *		Cody Doucette, <[email protected]>
7  *	        original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
8  */
9 
10 #include <linux/string.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_generic.h>
19 #include <net/inet_ecn.h>
20 
21 /*		SKB Priority Queue
22  *	=================================
23  *
24  * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
25  * packets according to their skb->priority field. Under congestion,
26  * Skbprio drops already-enqueued lower priority packets to make space
27  * available for higher priority packets; it was conceived as a solution
28  * for denial-of-service defenses that need to route packets with different
29  * priorities as a mean to overcome DoS attacks.
30  */
31 
32 struct skbprio_sched_data {
33 	/* Queue state. */
34 	struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
35 	struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
36 	u16 highest_prio;
37 	u16 lowest_prio;
38 };
39 
calc_new_high_prio(const struct skbprio_sched_data * q)40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
41 {
42 	int prio;
43 
44 	for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
45 		if (!skb_queue_empty(&q->qdiscs[prio]))
46 			return prio;
47 	}
48 
49 	/* SKB queue is empty, return 0 (default highest priority setting). */
50 	return 0;
51 }
52 
calc_new_low_prio(const struct skbprio_sched_data * q)53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
54 {
55 	int prio;
56 
57 	for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
58 		if (!skb_queue_empty(&q->qdiscs[prio]))
59 			return prio;
60 	}
61 
62 	/* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
63 	 * (default lowest priority setting).
64 	 */
65 	return SKBPRIO_MAX_PRIORITY - 1;
66 }
67 
skbprio_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)68 static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
69 			  struct sk_buff **to_free)
70 {
71 	const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
72 	struct skbprio_sched_data *q = qdisc_priv(sch);
73 	struct sk_buff_head *qdisc;
74 	struct sk_buff_head *lp_qdisc;
75 	struct sk_buff *to_drop;
76 	u16 prio, lp;
77 
78 	/* Obtain the priority of @skb. */
79 	prio = min(skb->priority, max_priority);
80 
81 	qdisc = &q->qdiscs[prio];
82 
83 	/* sch->limit can change under us from skbprio_change() */
84 	if (sch->q.qlen < READ_ONCE(sch->limit)) {
85 		__skb_queue_tail(qdisc, skb);
86 		qdisc_qstats_backlog_inc(sch, skb);
87 		q->qstats[prio].backlog += qdisc_pkt_len(skb);
88 
89 		/* Check to update highest and lowest priorities. */
90 		if (prio > q->highest_prio)
91 			q->highest_prio = prio;
92 
93 		if (prio < q->lowest_prio)
94 			q->lowest_prio = prio;
95 
96 		sch->q.qlen++;
97 		return NET_XMIT_SUCCESS;
98 	}
99 
100 	/* If this packet has the lowest priority, drop it. */
101 	lp = q->lowest_prio;
102 	if (prio <= lp) {
103 		q->qstats[prio].drops++;
104 		q->qstats[prio].overlimits++;
105 		return qdisc_drop(skb, sch, to_free);
106 	}
107 
108 	__skb_queue_tail(qdisc, skb);
109 	qdisc_qstats_backlog_inc(sch, skb);
110 	q->qstats[prio].backlog += qdisc_pkt_len(skb);
111 
112 	/* Drop the packet at the tail of the lowest priority qdisc. */
113 	lp_qdisc = &q->qdiscs[lp];
114 	to_drop = __skb_dequeue_tail(lp_qdisc);
115 	BUG_ON(!to_drop);
116 	qdisc_qstats_backlog_dec(sch, to_drop);
117 	qdisc_drop(to_drop, sch, to_free);
118 
119 	q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
120 	q->qstats[lp].drops++;
121 	q->qstats[lp].overlimits++;
122 
123 	/* Check to update highest and lowest priorities. */
124 	if (skb_queue_empty(lp_qdisc)) {
125 		if (q->lowest_prio == q->highest_prio) {
126 			q->lowest_prio = prio;
127 			q->highest_prio = prio;
128 		} else {
129 			q->lowest_prio = calc_new_low_prio(q);
130 		}
131 	}
132 
133 	if (prio > q->highest_prio)
134 		q->highest_prio = prio;
135 
136 	return NET_XMIT_CN;
137 }
138 
skbprio_dequeue(struct Qdisc * sch)139 static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
140 {
141 	struct skbprio_sched_data *q = qdisc_priv(sch);
142 	struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
143 	struct sk_buff *skb = __skb_dequeue(hpq);
144 
145 	if (unlikely(!skb))
146 		return NULL;
147 
148 	sch->q.qlen--;
149 	qdisc_qstats_backlog_dec(sch, skb);
150 	qdisc_bstats_update(sch, skb);
151 
152 	q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
153 
154 	/* Update highest priority field. */
155 	if (skb_queue_empty(hpq)) {
156 		if (q->lowest_prio == q->highest_prio) {
157 			q->highest_prio = 0;
158 			q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
159 		} else {
160 			q->highest_prio = calc_new_high_prio(q);
161 		}
162 	}
163 	return skb;
164 }
165 
skbprio_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)166 static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
167 			struct netlink_ext_ack *extack)
168 {
169 	struct tc_skbprio_qopt *ctl = nla_data(opt);
170 
171 	if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
172 		return -EINVAL;
173 
174 	WRITE_ONCE(sch->limit, ctl->limit);
175 	return 0;
176 }
177 
skbprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)178 static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
179 			struct netlink_ext_ack *extack)
180 {
181 	struct skbprio_sched_data *q = qdisc_priv(sch);
182 	int prio;
183 
184 	/* Initialise all queues, one for each possible priority. */
185 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
186 		__skb_queue_head_init(&q->qdiscs[prio]);
187 
188 	memset(&q->qstats, 0, sizeof(q->qstats));
189 	q->highest_prio = 0;
190 	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
191 	sch->limit = 64;
192 	if (!opt)
193 		return 0;
194 
195 	return skbprio_change(sch, opt, extack);
196 }
197 
skbprio_dump(struct Qdisc * sch,struct sk_buff * skb)198 static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
199 {
200 	struct tc_skbprio_qopt opt;
201 
202 	opt.limit = READ_ONCE(sch->limit);
203 
204 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
205 		return -1;
206 
207 	return skb->len;
208 }
209 
skbprio_reset(struct Qdisc * sch)210 static void skbprio_reset(struct Qdisc *sch)
211 {
212 	struct skbprio_sched_data *q = qdisc_priv(sch);
213 	int prio;
214 
215 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
216 		__skb_queue_purge(&q->qdiscs[prio]);
217 
218 	memset(&q->qstats, 0, sizeof(q->qstats));
219 	q->highest_prio = 0;
220 	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
221 }
222 
skbprio_destroy(struct Qdisc * sch)223 static void skbprio_destroy(struct Qdisc *sch)
224 {
225 	struct skbprio_sched_data *q = qdisc_priv(sch);
226 	int prio;
227 
228 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
229 		__skb_queue_purge(&q->qdiscs[prio]);
230 }
231 
skbprio_leaf(struct Qdisc * sch,unsigned long arg)232 static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
233 {
234 	return NULL;
235 }
236 
skbprio_find(struct Qdisc * sch,u32 classid)237 static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
238 {
239 	return 0;
240 }
241 
skbprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)242 static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
243 			     struct sk_buff *skb, struct tcmsg *tcm)
244 {
245 	tcm->tcm_handle |= TC_H_MIN(cl);
246 	return 0;
247 }
248 
skbprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)249 static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
250 				   struct gnet_dump *d)
251 {
252 	struct skbprio_sched_data *q = qdisc_priv(sch);
253 	if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
254 		q->qstats[cl - 1].qlen) < 0)
255 		return -1;
256 	return 0;
257 }
258 
skbprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)259 static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
260 {
261 	unsigned int i;
262 
263 	if (arg->stop)
264 		return;
265 
266 	for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
267 		if (!tc_qdisc_stats_dump(sch, i + 1, arg))
268 			break;
269 	}
270 }
271 
272 static const struct Qdisc_class_ops skbprio_class_ops = {
273 	.leaf		=	skbprio_leaf,
274 	.find		=	skbprio_find,
275 	.dump		=	skbprio_dump_class,
276 	.dump_stats	=	skbprio_dump_class_stats,
277 	.walk		=	skbprio_walk,
278 };
279 
280 static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
281 	.cl_ops		=	&skbprio_class_ops,
282 	.id		=	"skbprio",
283 	.priv_size	=	sizeof(struct skbprio_sched_data),
284 	.enqueue	=	skbprio_enqueue,
285 	.dequeue	=	skbprio_dequeue,
286 	.peek		=	qdisc_peek_dequeued,
287 	.init		=	skbprio_init,
288 	.reset		=	skbprio_reset,
289 	.change		=	skbprio_change,
290 	.dump		=	skbprio_dump,
291 	.destroy	=	skbprio_destroy,
292 	.owner		=	THIS_MODULE,
293 };
294 MODULE_ALIAS_NET_SCH("skbprio");
295 
skbprio_module_init(void)296 static int __init skbprio_module_init(void)
297 {
298 	return register_qdisc(&skbprio_qdisc_ops);
299 }
300 
skbprio_module_exit(void)301 static void __exit skbprio_module_exit(void)
302 {
303 	unregister_qdisc(&skbprio_qdisc_ops);
304 }
305 
306 module_init(skbprio_module_init)
307 module_exit(skbprio_module_exit)
308 
309 MODULE_LICENSE("GPL");
310 MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
311