1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2023 Marvell.
5  *
6  */
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/bitfield.h>
11 
12 #include "otx2_common.h"
13 #include "cn10k.h"
14 #include "qos.h"
15 
16 #define OTX2_QOS_QID_INNER		0xFFFFU
17 #define OTX2_QOS_QID_NONE		0xFFFEU
18 #define OTX2_QOS_ROOT_CLASSID		0xFFFFFFFF
19 #define OTX2_QOS_CLASS_NONE		0
20 #define OTX2_QOS_DEFAULT_PRIO		0xF
21 #define OTX2_QOS_INVALID_SQ		0xFFFF
22 #define OTX2_QOS_INVALID_TXSCHQ_IDX	0xFFFF
23 #define CN10K_MAX_RR_WEIGHT		GENMASK_ULL(13, 0)
24 #define OTX2_MAX_RR_QUANTUM		GENMASK_ULL(23, 0)
25 
otx2_qos_update_tx_netdev_queues(struct otx2_nic * pfvf)26 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27 {
28 	struct otx2_hw *hw = &pfvf->hw;
29 	int tx_queues, qos_txqs, err;
30 
31 	qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32 				 OTX2_QOS_MAX_LEAF_NODES);
33 
34 	tx_queues = hw->tx_queues + qos_txqs;
35 
36 	err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37 	if (err) {
38 		netdev_err(pfvf->netdev,
39 			   "Failed to set no of Tx queues: %d\n", tx_queues);
40 		return;
41 	}
42 }
43 
otx2_qos_get_regaddr(struct otx2_qos_node * node,struct nix_txschq_config * cfg,int index)44 static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45 				 struct nix_txschq_config *cfg,
46 				 int index)
47 {
48 	if (node->level == NIX_TXSCH_LVL_SMQ) {
49 		cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50 		cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51 		cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52 		cfg->reg[index]   = NIX_AF_MDQX_CIR(node->schq);
53 	} else if (node->level == NIX_TXSCH_LVL_TL4) {
54 		cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55 		cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56 		cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57 		cfg->reg[index]   = NIX_AF_TL4X_CIR(node->schq);
58 	} else if (node->level == NIX_TXSCH_LVL_TL3) {
59 		cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60 		cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61 		cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62 		cfg->reg[index]   = NIX_AF_TL3X_CIR(node->schq);
63 	} else if (node->level == NIX_TXSCH_LVL_TL2) {
64 		cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65 		cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66 		cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67 		cfg->reg[index]   = NIX_AF_TL2X_CIR(node->schq);
68 	}
69 }
70 
otx2_qos_quantum_to_dwrr_weight(struct otx2_nic * pfvf,u32 quantum)71 static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72 {
73 	u32 weight;
74 
75 	weight = quantum / pfvf->hw.dwrr_mtu;
76 	if (quantum % pfvf->hw.dwrr_mtu)
77 		weight += 1;
78 
79 	return weight;
80 }
81 
otx2_config_sched_shaping(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg,int * num_regs)82 static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83 				      struct otx2_qos_node *node,
84 				      struct nix_txschq_config *cfg,
85 				      int *num_regs)
86 {
87 	u32 rr_weight;
88 	u32 quantum;
89 	u64 maxrate;
90 
91 	otx2_qos_get_regaddr(node, cfg, *num_regs);
92 
93 	/* configure parent txschq */
94 	cfg->regval[*num_regs] = node->parent->schq << 16;
95 	(*num_regs)++;
96 
97 	/* configure prio/quantum */
98 	if (node->qid == OTX2_QOS_QID_NONE) {
99 		cfg->regval[*num_regs] =  node->prio << 24 |
100 					  mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101 		(*num_regs)++;
102 		return;
103 	}
104 
105 	/* configure priority/quantum  */
106 	if (node->is_static) {
107 		cfg->regval[*num_regs] =
108 			(node->schq - node->parent->prio_anchor) << 24;
109 	} else {
110 		quantum = node->quantum ?
111 			  node->quantum : pfvf->tx_max_pktlen;
112 		rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113 		cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114 					 rr_weight;
115 	}
116 	(*num_regs)++;
117 
118 	/* configure PIR */
119 	maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120 
121 	cfg->regval[*num_regs] =
122 		otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123 	(*num_regs)++;
124 
125 	/* Don't configure CIR when both CIR+PIR not supported
126 	 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127 	 */
128 	if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129 		return;
130 
131 	cfg->regval[*num_regs] =
132 		otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133 	(*num_regs)++;
134 }
135 
__otx2_qos_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg)136 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137 				  struct otx2_qos_node *node,
138 				  struct nix_txschq_config *cfg)
139 {
140 	struct otx2_hw *hw = &pfvf->hw;
141 	int num_regs = 0;
142 	u8 level;
143 
144 	level = node->level;
145 
146 	/* program txschq registers */
147 	if (level == NIX_TXSCH_LVL_SMQ) {
148 		cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149 		cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150 					OTX2_MIN_MTU;
151 		cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152 					 (0x2ULL << 36);
153 		num_regs++;
154 
155 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156 	} else if (level == NIX_TXSCH_LVL_TL4) {
157 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
158 	} else if (level == NIX_TXSCH_LVL_TL3) {
159 		/* configure link cfg */
160 		if (level == pfvf->qos.link_cfg_lvl) {
161 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
162 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
163 			num_regs++;
164 		}
165 
166 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
167 	} else if (level == NIX_TXSCH_LVL_TL2) {
168 		/* configure parent txschq */
169 		cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
170 		cfg->regval[num_regs] = (u64)hw->tx_link << 16;
171 		num_regs++;
172 
173 		/* configure link cfg */
174 		if (level == pfvf->qos.link_cfg_lvl) {
175 			cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
176 			cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
177 			num_regs++;
178 		}
179 
180 		/* check if node is root */
181 		if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
182 			cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
183 			cfg->regval[num_regs] =  (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
184 						 mtu_to_dwrr_weight(pfvf,
185 								    pfvf->tx_max_pktlen);
186 			num_regs++;
187 			goto txschq_cfg_out;
188 		}
189 
190 		otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
191 	}
192 
193 txschq_cfg_out:
194 	cfg->num_regs = num_regs;
195 }
196 
otx2_qos_txschq_set_parent_topology(struct otx2_nic * pfvf,struct otx2_qos_node * parent)197 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
198 					       struct otx2_qos_node *parent)
199 {
200 	struct mbox *mbox = &pfvf->mbox;
201 	struct nix_txschq_config *cfg;
202 	int rc;
203 
204 	if (parent->level == NIX_TXSCH_LVL_MDQ)
205 		return 0;
206 
207 	mutex_lock(&mbox->lock);
208 
209 	cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
210 	if (!cfg) {
211 		mutex_unlock(&mbox->lock);
212 		return -ENOMEM;
213 	}
214 
215 	cfg->lvl = parent->level;
216 
217 	if (parent->level == NIX_TXSCH_LVL_TL4)
218 		cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
219 	else if (parent->level == NIX_TXSCH_LVL_TL3)
220 		cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
221 	else if (parent->level == NIX_TXSCH_LVL_TL2)
222 		cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
223 	else if (parent->level == NIX_TXSCH_LVL_TL1)
224 		cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
225 
226 	cfg->regval[0] = (u64)parent->prio_anchor << 32;
227 	cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
228 			    parent->child_dwrr_prio : 0)  << 1;
229 	cfg->num_regs++;
230 
231 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
232 
233 	mutex_unlock(&mbox->lock);
234 
235 	return rc;
236 }
237 
otx2_qos_free_hw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)238 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
239 				       struct otx2_qos_node *parent)
240 {
241 	struct otx2_qos_node *node;
242 
243 	list_for_each_entry_reverse(node, &parent->child_schq_list, list)
244 		otx2_txschq_free_one(pfvf, node->level, node->schq);
245 }
246 
otx2_qos_free_hw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)247 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
248 				  struct otx2_qos_node *parent)
249 {
250 	struct otx2_qos_node *node, *tmp;
251 
252 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
253 		otx2_qos_free_hw_node(pfvf, node);
254 		otx2_qos_free_hw_node_schq(pfvf, node);
255 		otx2_txschq_free_one(pfvf, node->level, node->schq);
256 	}
257 }
258 
otx2_qos_free_hw_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node)259 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
260 				 struct otx2_qos_node *node)
261 {
262 	mutex_lock(&pfvf->qos.qos_lock);
263 
264 	/* free child node hw mappings */
265 	otx2_qos_free_hw_node(pfvf, node);
266 	otx2_qos_free_hw_node_schq(pfvf, node);
267 
268 	/* free node hw mappings */
269 	otx2_txschq_free_one(pfvf, node->level, node->schq);
270 
271 	mutex_unlock(&pfvf->qos.qos_lock);
272 }
273 
otx2_qos_sw_node_delete(struct otx2_nic * pfvf,struct otx2_qos_node * node)274 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
275 				    struct otx2_qos_node *node)
276 {
277 	hash_del_rcu(&node->hlist);
278 
279 	if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
280 		__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
281 		otx2_qos_update_tx_netdev_queues(pfvf);
282 	}
283 
284 	list_del(&node->list);
285 	kfree(node);
286 }
287 
otx2_qos_free_sw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)288 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
289 				       struct otx2_qos_node *parent)
290 {
291 	struct otx2_qos_node *node, *tmp;
292 
293 	list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
294 		list_del(&node->list);
295 		kfree(node);
296 	}
297 }
298 
__otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)299 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
300 				    struct otx2_qos_node *parent)
301 {
302 	struct otx2_qos_node *node, *tmp;
303 
304 	list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
305 		__otx2_qos_free_sw_node(pfvf, node);
306 		otx2_qos_free_sw_node_schq(pfvf, node);
307 		otx2_qos_sw_node_delete(pfvf, node);
308 	}
309 }
310 
otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)311 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
312 				  struct otx2_qos_node *node)
313 {
314 	mutex_lock(&pfvf->qos.qos_lock);
315 
316 	__otx2_qos_free_sw_node(pfvf, node);
317 	otx2_qos_free_sw_node_schq(pfvf, node);
318 	otx2_qos_sw_node_delete(pfvf, node);
319 
320 	mutex_unlock(&pfvf->qos.qos_lock);
321 }
322 
otx2_qos_destroy_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)323 static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
324 				  struct otx2_qos_node *node)
325 {
326 	otx2_qos_free_hw_cfg(pfvf, node);
327 	otx2_qos_free_sw_node(pfvf, node);
328 }
329 
otx2_qos_fill_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)330 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
331 				   struct otx2_qos_cfg *cfg)
332 {
333 	struct otx2_qos_node *node;
334 
335 	list_for_each_entry(node, &parent->child_schq_list, list)
336 		cfg->schq[node->level]++;
337 }
338 
otx2_qos_fill_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)339 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
340 				 struct otx2_qos_cfg *cfg)
341 {
342 	struct otx2_qos_node *node;
343 
344 	list_for_each_entry(node, &parent->child_list, list) {
345 		otx2_qos_fill_cfg_tl(node, cfg);
346 		otx2_qos_fill_cfg_schq(node, cfg);
347 	}
348 
349 	/* Assign the required number of transmit schedular queues under the
350 	 * given class
351 	 */
352 	cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
353 					       parent->max_static_prio + 1;
354 }
355 
otx2_qos_prepare_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)356 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
357 					struct otx2_qos_node *parent,
358 					struct otx2_qos_cfg *cfg)
359 {
360 	mutex_lock(&pfvf->qos.qos_lock);
361 	otx2_qos_fill_cfg_tl(parent, cfg);
362 	mutex_unlock(&pfvf->qos.qos_lock);
363 }
364 
otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)365 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
366 					  struct otx2_qos_cfg *cfg)
367 {
368 	struct otx2_qos_node *node;
369 	int cnt;
370 
371 	list_for_each_entry(node, &parent->child_schq_list, list) {
372 		cnt = cfg->dwrr_node_pos[node->level];
373 		cfg->schq_list[node->level][cnt] = node->schq;
374 		cfg->schq[node->level]++;
375 		cfg->dwrr_node_pos[node->level]++;
376 	}
377 }
378 
otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)379 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
380 					struct otx2_qos_cfg *cfg)
381 {
382 	struct otx2_qos_node *node;
383 	int cnt;
384 
385 	list_for_each_entry(node, &parent->child_list, list) {
386 		otx2_qos_read_txschq_cfg_tl(node, cfg);
387 		cnt = cfg->static_node_pos[node->level];
388 		cfg->schq_contig_list[node->level][cnt] = node->schq;
389 		cfg->schq_index_used[node->level][cnt] = true;
390 		cfg->schq_contig[node->level]++;
391 		cfg->static_node_pos[node->level]++;
392 		otx2_qos_read_txschq_cfg_schq(node, cfg);
393 	}
394 }
395 
otx2_qos_read_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)396 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
397 				     struct otx2_qos_node *node,
398 				     struct otx2_qos_cfg *cfg)
399 {
400 	mutex_lock(&pfvf->qos.qos_lock);
401 	otx2_qos_read_txschq_cfg_tl(node, cfg);
402 	mutex_unlock(&pfvf->qos.qos_lock);
403 }
404 
405 static struct otx2_qos_node *
otx2_qos_alloc_root(struct otx2_nic * pfvf)406 otx2_qos_alloc_root(struct otx2_nic *pfvf)
407 {
408 	struct otx2_qos_node *node;
409 
410 	node = kzalloc(sizeof(*node), GFP_KERNEL);
411 	if (!node)
412 		return ERR_PTR(-ENOMEM);
413 
414 	node->parent = NULL;
415 	if (!is_otx2_vf(pfvf->pcifunc)) {
416 		node->level = NIX_TXSCH_LVL_TL1;
417 	} else {
418 		node->level = NIX_TXSCH_LVL_TL2;
419 		node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
420 	}
421 
422 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
423 	node->classid = OTX2_QOS_ROOT_CLASSID;
424 
425 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
426 	list_add_tail(&node->list, &pfvf->qos.qos_tree);
427 	INIT_LIST_HEAD(&node->child_list);
428 	INIT_LIST_HEAD(&node->child_schq_list);
429 
430 	return node;
431 }
432 
otx2_qos_add_child_node(struct otx2_qos_node * parent,struct otx2_qos_node * node)433 static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
434 				   struct otx2_qos_node *node)
435 {
436 	struct list_head *head = &parent->child_list;
437 	struct otx2_qos_node *tmp_node;
438 	struct list_head *tmp;
439 
440 	if (node->prio > parent->max_static_prio)
441 		parent->max_static_prio = node->prio;
442 
443 	for (tmp = head->next; tmp != head; tmp = tmp->next) {
444 		tmp_node = list_entry(tmp, struct otx2_qos_node, list);
445 		if (tmp_node->prio == node->prio &&
446 		    tmp_node->is_static)
447 			return -EEXIST;
448 		if (tmp_node->prio > node->prio) {
449 			list_add_tail(&node->list, tmp);
450 			return 0;
451 		}
452 	}
453 
454 	list_add_tail(&node->list, head);
455 	return 0;
456 }
457 
otx2_qos_alloc_txschq_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)458 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
459 				      struct otx2_qos_node *node)
460 {
461 	struct otx2_qos_node *txschq_node, *parent, *tmp;
462 	int lvl;
463 
464 	parent = node;
465 	for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
466 		txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
467 		if (!txschq_node)
468 			goto err_out;
469 
470 		txschq_node->parent = parent;
471 		txschq_node->level = lvl;
472 		txschq_node->classid = OTX2_QOS_CLASS_NONE;
473 		WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
474 		txschq_node->rate = 0;
475 		txschq_node->ceil = 0;
476 		txschq_node->prio = 0;
477 		txschq_node->quantum = 0;
478 		txschq_node->is_static = true;
479 		txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
480 		txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
481 
482 		mutex_lock(&pfvf->qos.qos_lock);
483 		list_add_tail(&txschq_node->list, &node->child_schq_list);
484 		mutex_unlock(&pfvf->qos.qos_lock);
485 
486 		INIT_LIST_HEAD(&txschq_node->child_list);
487 		INIT_LIST_HEAD(&txschq_node->child_schq_list);
488 		parent = txschq_node;
489 	}
490 
491 	return 0;
492 
493 err_out:
494 	list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
495 				 list) {
496 		list_del(&txschq_node->list);
497 		kfree(txschq_node);
498 	}
499 	return -ENOMEM;
500 }
501 
502 static struct otx2_qos_node *
otx2_qos_sw_create_leaf_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent,u16 classid,u32 prio,u64 rate,u64 ceil,u32 quantum,u16 qid,bool static_cfg)503 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
504 			     struct otx2_qos_node *parent,
505 			     u16 classid, u32 prio, u64 rate, u64 ceil,
506 			     u32 quantum, u16 qid, bool static_cfg)
507 {
508 	struct otx2_qos_node *node;
509 	int err;
510 
511 	node = kzalloc(sizeof(*node), GFP_KERNEL);
512 	if (!node)
513 		return ERR_PTR(-ENOMEM);
514 
515 	node->parent = parent;
516 	node->level = parent->level - 1;
517 	node->classid = classid;
518 	WRITE_ONCE(node->qid, qid);
519 
520 	node->rate = otx2_convert_rate(rate);
521 	node->ceil = otx2_convert_rate(ceil);
522 	node->prio = prio;
523 	node->quantum = quantum;
524 	node->is_static = static_cfg;
525 	node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
526 	node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
527 
528 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
529 
530 	hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
531 
532 	mutex_lock(&pfvf->qos.qos_lock);
533 	err = otx2_qos_add_child_node(parent, node);
534 	if (err) {
535 		mutex_unlock(&pfvf->qos.qos_lock);
536 		return ERR_PTR(err);
537 	}
538 	mutex_unlock(&pfvf->qos.qos_lock);
539 
540 	INIT_LIST_HEAD(&node->child_list);
541 	INIT_LIST_HEAD(&node->child_schq_list);
542 
543 	err = otx2_qos_alloc_txschq_node(pfvf, node);
544 	if (err) {
545 		otx2_qos_sw_node_delete(pfvf, node);
546 		return ERR_PTR(-ENOMEM);
547 	}
548 
549 	return node;
550 }
551 
552 static struct otx2_qos_node
otx2_sw_node_find_by_qid(struct otx2_nic * pfvf,u16 qid)553 *otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
554 {
555 	struct otx2_qos_node *node = NULL;
556 	int bkt;
557 
558 	hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
559 		if (node->qid == qid)
560 			break;
561 	}
562 
563 	return node;
564 }
565 
566 static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic * pfvf,u32 classid)567 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
568 {
569 	struct otx2_qos_node *node = NULL;
570 
571 	hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
572 		if (node->classid == classid)
573 			break;
574 	}
575 
576 	return node;
577 }
578 
579 static struct otx2_qos_node *
otx2_sw_node_find_rcu(struct otx2_nic * pfvf,u32 classid)580 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
581 {
582 	struct otx2_qos_node *node = NULL;
583 
584 	hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
585 		if (node->classid == classid)
586 			break;
587 	}
588 
589 	return node;
590 }
591 
otx2_get_txq_by_classid(struct otx2_nic * pfvf,u16 classid)592 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
593 {
594 	struct otx2_qos_node *node;
595 	u16 qid;
596 	int res;
597 
598 	node = otx2_sw_node_find_rcu(pfvf, classid);
599 	if (!node) {
600 		res = -ENOENT;
601 		goto out;
602 	}
603 	qid = READ_ONCE(node->qid);
604 	if (qid == OTX2_QOS_QID_INNER) {
605 		res = -EINVAL;
606 		goto out;
607 	}
608 	res = pfvf->hw.tx_queues + qid;
609 out:
610 	return res;
611 }
612 
613 static int
otx2_qos_txschq_config(struct otx2_nic * pfvf,struct otx2_qos_node * node)614 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
615 {
616 	struct mbox *mbox = &pfvf->mbox;
617 	struct nix_txschq_config *req;
618 	int rc;
619 
620 	mutex_lock(&mbox->lock);
621 
622 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
623 	if (!req) {
624 		mutex_unlock(&mbox->lock);
625 		return -ENOMEM;
626 	}
627 
628 	req->lvl = node->level;
629 	__otx2_qos_txschq_cfg(pfvf, node, req);
630 
631 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
632 
633 	mutex_unlock(&mbox->lock);
634 
635 	return rc;
636 }
637 
otx2_qos_txschq_alloc(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)638 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
639 				 struct otx2_qos_cfg *cfg)
640 {
641 	struct nix_txsch_alloc_req *req;
642 	struct nix_txsch_alloc_rsp *rsp;
643 	struct mbox *mbox = &pfvf->mbox;
644 	int lvl, rc, schq;
645 
646 	mutex_lock(&mbox->lock);
647 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
648 	if (!req) {
649 		mutex_unlock(&mbox->lock);
650 		return -ENOMEM;
651 	}
652 
653 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
654 		req->schq[lvl] = cfg->schq[lvl];
655 		req->schq_contig[lvl] = cfg->schq_contig[lvl];
656 	}
657 
658 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
659 	if (rc) {
660 		mutex_unlock(&mbox->lock);
661 		return rc;
662 	}
663 
664 	rsp = (struct nix_txsch_alloc_rsp *)
665 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
666 
667 	if (IS_ERR(rsp)) {
668 		rc = PTR_ERR(rsp);
669 		goto out;
670 	}
671 
672 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
673 		for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
674 			cfg->schq_contig_list[lvl][schq] =
675 				rsp->schq_contig_list[lvl][schq];
676 		}
677 	}
678 
679 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
680 		for (schq = 0; schq < rsp->schq[lvl]; schq++) {
681 			cfg->schq_list[lvl][schq] =
682 				rsp->schq_list[lvl][schq];
683 		}
684 	}
685 
686 	pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
687 	pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
688 
689 out:
690 	mutex_unlock(&mbox->lock);
691 	return rc;
692 }
693 
otx2_qos_free_unused_txschq(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)694 static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
695 					struct otx2_qos_cfg *cfg)
696 {
697 	int lvl, idx, schq;
698 
699 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
700 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
701 			if (!cfg->schq_index_used[lvl][idx]) {
702 				schq = cfg->schq_contig_list[lvl][idx];
703 				otx2_txschq_free_one(pfvf, lvl, schq);
704 			}
705 		}
706 	}
707 }
708 
otx2_qos_txschq_fill_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)709 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
710 					  struct otx2_qos_node *node,
711 					  struct otx2_qos_cfg *cfg)
712 {
713 	struct otx2_qos_node *tmp;
714 	int cnt;
715 
716 	list_for_each_entry(tmp, &node->child_schq_list, list) {
717 		cnt = cfg->dwrr_node_pos[tmp->level];
718 		tmp->schq = cfg->schq_list[tmp->level][cnt];
719 		cfg->dwrr_node_pos[tmp->level]++;
720 	}
721 }
722 
otx2_qos_txschq_fill_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)723 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
724 					struct otx2_qos_node *node,
725 					struct otx2_qos_cfg *cfg)
726 {
727 	struct otx2_qos_node *tmp;
728 	int cnt;
729 
730 	list_for_each_entry(tmp, &node->child_list, list) {
731 		otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
732 		cnt = cfg->static_node_pos[tmp->level];
733 		tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
734 		cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
735 		if (cnt == 0)
736 			node->prio_anchor =
737 				cfg->schq_contig_list[tmp->level][0];
738 		cfg->static_node_pos[tmp->level]++;
739 		otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
740 	}
741 }
742 
otx2_qos_txschq_fill_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)743 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
744 				     struct otx2_qos_node *node,
745 				     struct otx2_qos_cfg *cfg)
746 {
747 	mutex_lock(&pfvf->qos.qos_lock);
748 	otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
749 	otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
750 	otx2_qos_free_unused_txschq(pfvf, cfg);
751 	mutex_unlock(&pfvf->qos.qos_lock);
752 }
753 
__otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * tmp,unsigned long * child_idx_bmap,int child_cnt)754 static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
755 					  struct otx2_qos_node *tmp,
756 					  unsigned long *child_idx_bmap,
757 					  int child_cnt)
758 {
759 	int idx;
760 
761 	if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
762 		return;
763 
764 	/* assign static nodes 1:1 prio mapping first, then remaining nodes */
765 	for (idx = 0; idx < child_cnt; idx++) {
766 		if (tmp->is_static && tmp->prio == idx &&
767 		    !test_bit(idx, child_idx_bmap)) {
768 			tmp->txschq_idx = idx;
769 			set_bit(idx, child_idx_bmap);
770 			return;
771 		} else if (!tmp->is_static && idx >= tmp->prio &&
772 			   !test_bit(idx, child_idx_bmap)) {
773 			tmp->txschq_idx = idx;
774 			set_bit(idx, child_idx_bmap);
775 			return;
776 		}
777 	}
778 }
779 
otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node)780 static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
781 				       struct otx2_qos_node *node)
782 {
783 	unsigned long *child_idx_bmap;
784 	struct otx2_qos_node *tmp;
785 	int child_cnt;
786 
787 	list_for_each_entry(tmp, &node->child_list, list)
788 		tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
789 
790 	/* allocate child index array */
791 	child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
792 	child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
793 				 sizeof(unsigned long),
794 				 GFP_KERNEL);
795 	if (!child_idx_bmap)
796 		return -ENOMEM;
797 
798 	list_for_each_entry(tmp, &node->child_list, list)
799 		otx2_qos_assign_base_idx_tl(pfvf, tmp);
800 
801 	/* assign base index of static priority children first */
802 	list_for_each_entry(tmp, &node->child_list, list) {
803 		if (!tmp->is_static)
804 			continue;
805 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
806 					      child_cnt);
807 	}
808 
809 	/* assign base index of dwrr priority children */
810 	list_for_each_entry(tmp, &node->child_list, list)
811 		__otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
812 					      child_cnt);
813 
814 	kfree(child_idx_bmap);
815 
816 	return 0;
817 }
818 
otx2_qos_assign_base_idx(struct otx2_nic * pfvf,struct otx2_qos_node * node)819 static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
820 				    struct otx2_qos_node *node)
821 {
822 	int ret = 0;
823 
824 	mutex_lock(&pfvf->qos.qos_lock);
825 	ret = otx2_qos_assign_base_idx_tl(pfvf, node);
826 	mutex_unlock(&pfvf->qos.qos_lock);
827 
828 	return ret;
829 }
830 
otx2_qos_txschq_push_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)831 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
832 					 struct otx2_qos_node *node,
833 					 struct otx2_qos_cfg *cfg)
834 {
835 	struct otx2_qos_node *tmp;
836 	int ret;
837 
838 	list_for_each_entry(tmp, &node->child_schq_list, list) {
839 		ret = otx2_qos_txschq_config(pfvf, tmp);
840 		if (ret)
841 			return -EIO;
842 		ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
843 		if (ret)
844 			return -EIO;
845 	}
846 
847 	return 0;
848 }
849 
otx2_qos_txschq_push_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)850 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
851 				       struct otx2_qos_node *node,
852 				       struct otx2_qos_cfg *cfg)
853 {
854 	struct otx2_qos_node *tmp;
855 	int ret;
856 
857 	list_for_each_entry(tmp, &node->child_list, list) {
858 		ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
859 		if (ret)
860 			return -EIO;
861 		ret = otx2_qos_txschq_config(pfvf, tmp);
862 		if (ret)
863 			return -EIO;
864 		ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
865 		if (ret)
866 			return -EIO;
867 	}
868 
869 	ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
870 	if (ret)
871 		return -EIO;
872 
873 	return 0;
874 }
875 
otx2_qos_txschq_push_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)876 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
877 				    struct otx2_qos_node *node,
878 				    struct otx2_qos_cfg *cfg)
879 {
880 	int ret;
881 
882 	mutex_lock(&pfvf->qos.qos_lock);
883 	ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
884 	if (ret)
885 		goto out;
886 	ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
887 out:
888 	mutex_unlock(&pfvf->qos.qos_lock);
889 	return ret;
890 }
891 
otx2_qos_txschq_update_config(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)892 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
893 					 struct otx2_qos_node *node,
894 					 struct otx2_qos_cfg *cfg)
895 {
896 	otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
897 
898 	return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
899 }
900 
otx2_qos_txschq_update_root_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * root,struct otx2_qos_cfg * cfg)901 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
902 					   struct otx2_qos_node *root,
903 					   struct otx2_qos_cfg *cfg)
904 {
905 	root->schq = cfg->schq_list[root->level][0];
906 	return otx2_qos_txschq_config(pfvf, root);
907 }
908 
otx2_qos_free_cfg(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)909 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
910 {
911 	int lvl, idx, schq;
912 
913 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
914 		for (idx = 0; idx < cfg->schq[lvl]; idx++) {
915 			schq = cfg->schq_list[lvl][idx];
916 			otx2_txschq_free_one(pfvf, lvl, schq);
917 		}
918 	}
919 
920 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
921 		for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
922 			if (cfg->schq_index_used[lvl][idx]) {
923 				schq = cfg->schq_contig_list[lvl][idx];
924 				otx2_txschq_free_one(pfvf, lvl, schq);
925 			}
926 		}
927 	}
928 }
929 
otx2_qos_enadis_sq(struct otx2_nic * pfvf,struct otx2_qos_node * node,u16 qid)930 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
931 			       struct otx2_qos_node *node,
932 			       u16 qid)
933 {
934 	if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
935 		otx2_qos_disable_sq(pfvf, qid);
936 
937 	pfvf->qos.qid_to_sqmap[qid] = node->schq;
938 	otx2_qos_txschq_config(pfvf, node);
939 	otx2_qos_enable_sq(pfvf, qid);
940 }
941 
otx2_qos_update_smq_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)942 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
943 				     struct otx2_qos_node *node,
944 				     bool action)
945 {
946 	struct otx2_qos_node *tmp;
947 
948 	if (node->qid == OTX2_QOS_QID_INNER)
949 		return;
950 
951 	list_for_each_entry(tmp, &node->child_schq_list, list) {
952 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
953 			if (action == QOS_SMQ_FLUSH)
954 				otx2_smq_flush(pfvf, tmp->schq);
955 			else
956 				otx2_qos_enadis_sq(pfvf, tmp, node->qid);
957 		}
958 	}
959 }
960 
__otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)961 static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
962 				  struct otx2_qos_node *node,
963 				  bool action)
964 {
965 	struct otx2_qos_node *tmp;
966 
967 	list_for_each_entry(tmp, &node->child_list, list) {
968 		__otx2_qos_update_smq(pfvf, tmp, action);
969 		if (tmp->qid == OTX2_QOS_QID_INNER)
970 			continue;
971 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
972 			if (action == QOS_SMQ_FLUSH)
973 				otx2_smq_flush(pfvf, tmp->schq);
974 			else
975 				otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
976 		} else {
977 			otx2_qos_update_smq_schq(pfvf, tmp, action);
978 		}
979 	}
980 }
981 
otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)982 static void otx2_qos_update_smq(struct otx2_nic *pfvf,
983 				struct otx2_qos_node *node,
984 				bool action)
985 {
986 	mutex_lock(&pfvf->qos.qos_lock);
987 	__otx2_qos_update_smq(pfvf, node, action);
988 	otx2_qos_update_smq_schq(pfvf, node, action);
989 	mutex_unlock(&pfvf->qos.qos_lock);
990 }
991 
otx2_qos_push_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)992 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
993 				    struct otx2_qos_node *node,
994 				    struct otx2_qos_cfg *cfg)
995 {
996 	int ret;
997 
998 	ret = otx2_qos_txschq_alloc(pfvf, cfg);
999 	if (ret)
1000 		return -ENOSPC;
1001 
1002 	ret = otx2_qos_assign_base_idx(pfvf, node);
1003 	if (ret)
1004 		return -ENOMEM;
1005 
1006 	if (!(pfvf->netdev->flags & IFF_UP)) {
1007 		otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
1008 		return 0;
1009 	}
1010 
1011 	ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
1012 	if (ret) {
1013 		otx2_qos_free_cfg(pfvf, cfg);
1014 		return -EIO;
1015 	}
1016 
1017 	otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1018 
1019 	return 0;
1020 }
1021 
otx2_qos_update_tree(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)1022 static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1023 				struct otx2_qos_node *node,
1024 				struct otx2_qos_cfg *cfg)
1025 {
1026 	otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1027 	return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1028 }
1029 
otx2_qos_root_add(struct otx2_nic * pfvf,u16 htb_maj_id,u16 htb_defcls,struct netlink_ext_ack * extack)1030 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1031 			     struct netlink_ext_ack *extack)
1032 {
1033 	struct otx2_qos_cfg *new_cfg;
1034 	struct otx2_qos_node *root;
1035 	int err;
1036 
1037 	netdev_dbg(pfvf->netdev,
1038 		   "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1039 		   htb_maj_id, htb_defcls);
1040 
1041 	root = otx2_qos_alloc_root(pfvf);
1042 	if (IS_ERR(root)) {
1043 		err = PTR_ERR(root);
1044 		return err;
1045 	}
1046 
1047 	/* allocate txschq queue */
1048 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1049 	if (!new_cfg) {
1050 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1051 		err = -ENOMEM;
1052 		goto free_root_node;
1053 	}
1054 	/* allocate htb root node */
1055 	new_cfg->schq[root->level] = 1;
1056 	err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1057 	if (err) {
1058 		NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1059 		goto free_root_node;
1060 	}
1061 
1062 	/* Update TL1 RR PRIO */
1063 	if (root->level == NIX_TXSCH_LVL_TL1) {
1064 		root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1065 		netdev_dbg(pfvf->netdev,
1066 			   "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1067 	}
1068 
1069 	if (!(pfvf->netdev->flags & IFF_UP) ||
1070 	    root->level == NIX_TXSCH_LVL_TL1) {
1071 		root->schq = new_cfg->schq_list[root->level][0];
1072 		goto out;
1073 	}
1074 
1075 	/* update the txschq configuration in hw */
1076 	err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1077 	if (err) {
1078 		NL_SET_ERR_MSG_MOD(extack,
1079 				   "Error updating txschq configuration");
1080 		goto txschq_free;
1081 	}
1082 
1083 out:
1084 	WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1085 	/* Pairs with smp_load_acquire() in ndo_select_queue */
1086 	smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1087 	kfree(new_cfg);
1088 	return 0;
1089 
1090 txschq_free:
1091 	otx2_qos_free_cfg(pfvf, new_cfg);
1092 free_root_node:
1093 	kfree(new_cfg);
1094 	otx2_qos_sw_node_delete(pfvf, root);
1095 	return err;
1096 }
1097 
otx2_qos_root_destroy(struct otx2_nic * pfvf)1098 static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1099 {
1100 	struct otx2_qos_node *root;
1101 
1102 	netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1103 
1104 	/* find root node */
1105 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1106 	if (!root)
1107 		return -ENOENT;
1108 
1109 	/* free the hw mappings */
1110 	otx2_qos_destroy_node(pfvf, root);
1111 
1112 	return 0;
1113 }
1114 
otx2_qos_validate_quantum(struct otx2_nic * pfvf,u32 quantum)1115 static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1116 {
1117 	u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1118 	int err = 0;
1119 
1120 	/* Max Round robin weight supported by octeontx2 and CN10K
1121 	 * is different. Validate accordingly
1122 	 */
1123 	if (is_dev_otx2(pfvf->pdev))
1124 		err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1125 	else if	(rr_weight > CN10K_MAX_RR_WEIGHT)
1126 		err = -EINVAL;
1127 
1128 	return err;
1129 }
1130 
otx2_qos_validate_dwrr_cfg(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,u64 quantum)1131 static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1132 				      struct netlink_ext_ack *extack,
1133 				      struct otx2_nic *pfvf,
1134 				      u64 prio, u64 quantum)
1135 {
1136 	int err;
1137 
1138 	err = otx2_qos_validate_quantum(pfvf, quantum);
1139 	if (err) {
1140 		NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1141 		return err;
1142 	}
1143 
1144 	if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1145 		parent->child_dwrr_prio = prio;
1146 	} else if (prio != parent->child_dwrr_prio) {
1147 		NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1148 		return -EOPNOTSUPP;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
otx2_qos_validate_configuration(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,bool static_cfg)1154 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1155 					   struct netlink_ext_ack *extack,
1156 					   struct otx2_nic *pfvf,
1157 					   u64 prio, bool static_cfg)
1158 {
1159 	if (prio == parent->child_dwrr_prio && static_cfg) {
1160 		NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1161 		return -EEXIST;
1162 	}
1163 
1164 	if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1165 		NL_SET_ERR_MSG_MOD(extack,
1166 				   "Static priority child with same priority exists");
1167 		return -EEXIST;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
otx2_reset_dwrr_prio(struct otx2_qos_node * parent,u64 prio)1173 static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1174 {
1175 	/* For PF, root node dwrr priority is static */
1176 	if (parent->level == NIX_TXSCH_LVL_TL1)
1177 		return;
1178 
1179 	if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1180 		parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1181 		clear_bit(prio, parent->prio_bmap);
1182 	}
1183 }
1184 
is_qos_node_dwrr(struct otx2_qos_node * parent,struct otx2_nic * pfvf,u64 prio)1185 static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1186 			     struct otx2_nic *pfvf,
1187 			     u64 prio)
1188 {
1189 	struct otx2_qos_node *node;
1190 	bool ret = false;
1191 
1192 	if (parent->child_dwrr_prio == prio)
1193 		return true;
1194 
1195 	mutex_lock(&pfvf->qos.qos_lock);
1196 	list_for_each_entry(node, &parent->child_list, list) {
1197 		if (prio == node->prio) {
1198 			if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1199 			    parent->child_dwrr_prio != prio)
1200 				continue;
1201 
1202 			if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1203 				netdev_err(pfvf->netdev,
1204 					   "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1205 					    node->classid, node->quantum,
1206 					    node->prio);
1207 				break;
1208 			}
1209 			/* mark old node as dwrr */
1210 			node->is_static = false;
1211 			parent->child_dwrr_cnt++;
1212 			parent->child_static_cnt--;
1213 			ret = true;
1214 			break;
1215 		}
1216 	}
1217 	mutex_unlock(&pfvf->qos.qos_lock);
1218 
1219 	return ret;
1220 }
1221 
otx2_qos_leaf_alloc_queue(struct otx2_nic * pfvf,u16 classid,u32 parent_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1222 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1223 				     u32 parent_classid, u64 rate, u64 ceil,
1224 				     u64 prio, u32 quantum,
1225 				     struct netlink_ext_ack *extack)
1226 {
1227 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1228 	struct otx2_qos_node *node, *parent;
1229 	int qid, ret, err;
1230 	bool static_cfg;
1231 
1232 	netdev_dbg(pfvf->netdev,
1233 		   "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1234 		   classid, parent_classid, rate, ceil, prio, quantum);
1235 
1236 	if (prio > OTX2_QOS_MAX_PRIO) {
1237 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1238 		ret = -EOPNOTSUPP;
1239 		goto out;
1240 	}
1241 
1242 	if (!quantum || quantum > INT_MAX) {
1243 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1244 		ret = -EOPNOTSUPP;
1245 		goto out;
1246 	}
1247 
1248 	/* get parent node */
1249 	parent = otx2_sw_node_find(pfvf, parent_classid);
1250 	if (!parent) {
1251 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1252 		ret = -ENOENT;
1253 		goto out;
1254 	}
1255 	if (parent->level == NIX_TXSCH_LVL_MDQ) {
1256 		NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1257 		ret = -EOPNOTSUPP;
1258 		goto out;
1259 	}
1260 
1261 	static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1262 	ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1263 					      static_cfg);
1264 	if (ret)
1265 		goto out;
1266 
1267 	if (!static_cfg) {
1268 		ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1269 						 quantum);
1270 		if (ret)
1271 			goto out;
1272 	}
1273 
1274 	if (static_cfg)
1275 		parent->child_static_cnt++;
1276 	else
1277 		parent->child_dwrr_cnt++;
1278 
1279 	set_bit(prio, parent->prio_bmap);
1280 
1281 	/* read current txschq configuration */
1282 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1283 	if (!old_cfg) {
1284 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1285 		ret = -ENOMEM;
1286 		goto reset_prio;
1287 	}
1288 	otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1289 
1290 	/* allocate a new sq */
1291 	qid = otx2_qos_get_qid(pfvf);
1292 	if (qid < 0) {
1293 		NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1294 		ret = -ENOMEM;
1295 		goto free_old_cfg;
1296 	}
1297 
1298 	/* Actual SQ mapping will be updated after SMQ alloc */
1299 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1300 
1301 	/* allocate and initialize a new child node */
1302 	node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1303 					    ceil, quantum, qid, static_cfg);
1304 	if (IS_ERR(node)) {
1305 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1306 		ret = PTR_ERR(node);
1307 		goto free_old_cfg;
1308 	}
1309 
1310 	/* push new txschq config to hw */
1311 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1312 	if (!new_cfg) {
1313 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1314 		ret = -ENOMEM;
1315 		goto free_node;
1316 	}
1317 	ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1318 	if (ret) {
1319 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1320 		kfree(new_cfg);
1321 		otx2_qos_sw_node_delete(pfvf, node);
1322 		/* restore the old qos tree */
1323 		err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1324 		if (err) {
1325 			netdev_err(pfvf->netdev,
1326 				   "Failed to restore txcshq configuration");
1327 			goto free_old_cfg;
1328 		}
1329 
1330 		otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1331 		goto free_old_cfg;
1332 	}
1333 
1334 	/* update tx_real_queues */
1335 	otx2_qos_update_tx_netdev_queues(pfvf);
1336 
1337 	/* free new txschq config */
1338 	kfree(new_cfg);
1339 
1340 	/* free old txschq config */
1341 	otx2_qos_free_cfg(pfvf, old_cfg);
1342 	kfree(old_cfg);
1343 
1344 	return pfvf->hw.tx_queues + qid;
1345 
1346 free_node:
1347 	otx2_qos_sw_node_delete(pfvf, node);
1348 free_old_cfg:
1349 	kfree(old_cfg);
1350 reset_prio:
1351 	if (static_cfg)
1352 		parent->child_static_cnt--;
1353 	else
1354 		parent->child_dwrr_cnt--;
1355 
1356 	clear_bit(prio, parent->prio_bmap);
1357 out:
1358 	return ret;
1359 }
1360 
otx2_qos_leaf_to_inner(struct otx2_nic * pfvf,u16 classid,u16 child_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1361 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1362 				  u16 child_classid, u64 rate, u64 ceil, u64 prio,
1363 				  u32 quantum, struct netlink_ext_ack *extack)
1364 {
1365 	struct otx2_qos_cfg *old_cfg, *new_cfg;
1366 	struct otx2_qos_node *node, *child;
1367 	bool static_cfg;
1368 	int ret, err;
1369 	u16 qid;
1370 
1371 	netdev_dbg(pfvf->netdev,
1372 		   "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1373 		   classid, child_classid, rate, ceil);
1374 
1375 	if (prio > OTX2_QOS_MAX_PRIO) {
1376 		NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1377 		ret = -EOPNOTSUPP;
1378 		goto out;
1379 	}
1380 
1381 	if (!quantum || quantum > INT_MAX) {
1382 		NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1383 		ret = -EOPNOTSUPP;
1384 		goto out;
1385 	}
1386 
1387 	/* find node related to classid */
1388 	node = otx2_sw_node_find(pfvf, classid);
1389 	if (!node) {
1390 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1391 		ret = -ENOENT;
1392 		goto out;
1393 	}
1394 	/* check max qos txschq level */
1395 	if (node->level == NIX_TXSCH_LVL_MDQ) {
1396 		NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1397 		ret = -EOPNOTSUPP;
1398 		goto out;
1399 	}
1400 
1401 	static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1402 	if (!static_cfg) {
1403 		ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1404 						 quantum);
1405 		if (ret)
1406 			goto out;
1407 	}
1408 
1409 	if (static_cfg)
1410 		node->child_static_cnt++;
1411 	else
1412 		node->child_dwrr_cnt++;
1413 
1414 	set_bit(prio, node->prio_bmap);
1415 
1416 	/* store the qid to assign to leaf node */
1417 	qid = node->qid;
1418 
1419 	/* read current txschq configuration */
1420 	old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1421 	if (!old_cfg) {
1422 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1423 		ret = -ENOMEM;
1424 		goto reset_prio;
1425 	}
1426 	otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1427 
1428 	/* delete the txschq nodes allocated for this node */
1429 	otx2_qos_disable_sq(pfvf, qid);
1430 	otx2_qos_free_hw_node_schq(pfvf, node);
1431 	otx2_qos_free_sw_node_schq(pfvf, node);
1432 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1433 
1434 	/* mark this node as htb inner node */
1435 	WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1436 
1437 	/* allocate and initialize a new child node */
1438 	child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1439 					     prio, rate, ceil, quantum,
1440 					     qid, static_cfg);
1441 	if (IS_ERR(child)) {
1442 		NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1443 		ret = PTR_ERR(child);
1444 		goto free_old_cfg;
1445 	}
1446 
1447 	/* push new txschq config to hw */
1448 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1449 	if (!new_cfg) {
1450 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1451 		ret = -ENOMEM;
1452 		goto free_node;
1453 	}
1454 	ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1455 	if (ret) {
1456 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1457 		kfree(new_cfg);
1458 		otx2_qos_sw_node_delete(pfvf, child);
1459 		/* restore the old qos tree */
1460 		WRITE_ONCE(node->qid, qid);
1461 		err = otx2_qos_alloc_txschq_node(pfvf, node);
1462 		if (err) {
1463 			netdev_err(pfvf->netdev,
1464 				   "Failed to restore old leaf node");
1465 			goto free_old_cfg;
1466 		}
1467 		err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1468 		if (err) {
1469 			netdev_err(pfvf->netdev,
1470 				   "Failed to restore txcshq configuration");
1471 			goto free_old_cfg;
1472 		}
1473 		otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1474 		goto free_old_cfg;
1475 	}
1476 
1477 	/* free new txschq config */
1478 	kfree(new_cfg);
1479 
1480 	/* free old txschq config */
1481 	otx2_qos_free_cfg(pfvf, old_cfg);
1482 	kfree(old_cfg);
1483 
1484 	return 0;
1485 
1486 free_node:
1487 	otx2_qos_sw_node_delete(pfvf, child);
1488 free_old_cfg:
1489 	kfree(old_cfg);
1490 reset_prio:
1491 	if (static_cfg)
1492 		node->child_static_cnt--;
1493 	else
1494 		node->child_dwrr_cnt--;
1495 	clear_bit(prio, node->prio_bmap);
1496 out:
1497 	return ret;
1498 }
1499 
otx2_qos_cur_leaf_nodes(struct otx2_nic * pfvf)1500 static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
1501 {
1502 	int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
1503 
1504 	return last ==  pfvf->hw.tc_tx_queues ? 0 : last + 1;
1505 }
1506 
otx2_reset_qdisc(struct net_device * dev,u16 qid)1507 static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
1508 {
1509 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
1510 	struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
1511 
1512 	if (!qdisc)
1513 		return;
1514 
1515 	spin_lock_bh(qdisc_lock(qdisc));
1516 	qdisc_reset(qdisc);
1517 	spin_unlock_bh(qdisc_lock(qdisc));
1518 }
1519 
otx2_cfg_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,int qid)1520 static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
1521 			 int qid)
1522 {
1523 	struct otx2_qos_node *tmp;
1524 
1525 	list_for_each_entry(tmp, &node->child_schq_list, list)
1526 		if (tmp->level == NIX_TXSCH_LVL_MDQ) {
1527 			otx2_qos_txschq_config(pfvf, tmp);
1528 			pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
1529 		}
1530 }
1531 
otx2_qos_leaf_del(struct otx2_nic * pfvf,u16 * classid,struct netlink_ext_ack * extack)1532 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1533 			     struct netlink_ext_ack *extack)
1534 {
1535 	struct otx2_qos_node *node, *parent;
1536 	int dwrr_del_node = false;
1537 	u16 qid, moved_qid;
1538 	u64 prio;
1539 
1540 	netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1541 
1542 	/* find node related to classid */
1543 	node = otx2_sw_node_find(pfvf, *classid);
1544 	if (!node) {
1545 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1546 		return -ENOENT;
1547 	}
1548 	parent = node->parent;
1549 	prio   = node->prio;
1550 	qid    = node->qid;
1551 
1552 	if (!node->is_static)
1553 		dwrr_del_node = true;
1554 
1555 	otx2_qos_disable_sq(pfvf, node->qid);
1556 
1557 	otx2_qos_destroy_node(pfvf, node);
1558 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1559 
1560 	if (dwrr_del_node) {
1561 		parent->child_dwrr_cnt--;
1562 	} else {
1563 		parent->child_static_cnt--;
1564 		clear_bit(prio, parent->prio_bmap);
1565 	}
1566 
1567 	/* Reset DWRR priority if all dwrr nodes are deleted */
1568 	if (!parent->child_dwrr_cnt)
1569 		otx2_reset_dwrr_prio(parent, prio);
1570 
1571 	if (!parent->child_static_cnt)
1572 		parent->max_static_prio = 0;
1573 
1574 	moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
1575 
1576 	/* last node just deleted */
1577 	if (moved_qid == 0 || moved_qid == qid)
1578 		return 0;
1579 
1580 	moved_qid--;
1581 
1582 	node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
1583 	if (!node)
1584 		return 0;
1585 
1586 	/* stop traffic to the old queue and disable
1587 	 * SQ associated with it
1588 	 */
1589 	node->qid =  OTX2_QOS_QID_INNER;
1590 	__clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
1591 	otx2_qos_disable_sq(pfvf, moved_qid);
1592 
1593 	otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
1594 
1595 	/* enable SQ associated with qid and
1596 	 * update the node
1597 	 */
1598 	otx2_cfg_smq(pfvf, node, qid);
1599 
1600 	otx2_qos_enable_sq(pfvf, qid);
1601 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1602 	node->qid = qid;
1603 
1604 	*classid = node->classid;
1605 	return 0;
1606 }
1607 
otx2_qos_leaf_del_last(struct otx2_nic * pfvf,u16 classid,bool force,struct netlink_ext_ack * extack)1608 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1609 				  struct netlink_ext_ack *extack)
1610 {
1611 	struct otx2_qos_node *node, *parent;
1612 	struct otx2_qos_cfg *new_cfg;
1613 	int dwrr_del_node = false;
1614 	u64 prio;
1615 	int err;
1616 	u16 qid;
1617 
1618 	netdev_dbg(pfvf->netdev,
1619 		   "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1620 
1621 	/* find node related to classid */
1622 	node = otx2_sw_node_find(pfvf, classid);
1623 	if (!node) {
1624 		NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1625 		return -ENOENT;
1626 	}
1627 
1628 	/* save qid for use by parent */
1629 	qid = node->qid;
1630 	prio = node->prio;
1631 
1632 	parent = otx2_sw_node_find(pfvf, node->parent->classid);
1633 	if (!parent) {
1634 		NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1635 		return -ENOENT;
1636 	}
1637 
1638 	if (!node->is_static)
1639 		dwrr_del_node = true;
1640 
1641 	/* destroy the leaf node */
1642 	otx2_qos_disable_sq(pfvf, qid);
1643 	otx2_qos_destroy_node(pfvf, node);
1644 	pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1645 
1646 	if (dwrr_del_node) {
1647 		parent->child_dwrr_cnt--;
1648 	} else {
1649 		parent->child_static_cnt--;
1650 		clear_bit(prio, parent->prio_bmap);
1651 	}
1652 
1653 	/* Reset DWRR priority if all dwrr nodes are deleted */
1654 	if (!parent->child_dwrr_cnt)
1655 		otx2_reset_dwrr_prio(parent, prio);
1656 
1657 	if (!parent->child_static_cnt)
1658 		parent->max_static_prio = 0;
1659 
1660 	/* create downstream txschq entries to parent */
1661 	err = otx2_qos_alloc_txschq_node(pfvf, parent);
1662 	if (err) {
1663 		NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1664 		return err;
1665 	}
1666 	WRITE_ONCE(parent->qid, qid);
1667 	__set_bit(qid, pfvf->qos.qos_sq_bmap);
1668 
1669 	/* push new txschq config to hw */
1670 	new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1671 	if (!new_cfg) {
1672 		NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1673 		return -ENOMEM;
1674 	}
1675 	/* fill txschq cfg and push txschq cfg to hw */
1676 	otx2_qos_fill_cfg_schq(parent, new_cfg);
1677 	err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1678 	if (err) {
1679 		NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1680 		kfree(new_cfg);
1681 		return err;
1682 	}
1683 	kfree(new_cfg);
1684 
1685 	/* update tx_real_queues */
1686 	otx2_qos_update_tx_netdev_queues(pfvf);
1687 
1688 	return 0;
1689 }
1690 
otx2_clean_qos_queues(struct otx2_nic * pfvf)1691 void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1692 {
1693 	struct otx2_qos_node *root;
1694 
1695 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1696 	if (!root)
1697 		return;
1698 
1699 	otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1700 }
1701 
otx2_qos_config_txschq(struct otx2_nic * pfvf)1702 void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1703 {
1704 	struct otx2_qos_node *root;
1705 	int err;
1706 
1707 	root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1708 	if (!root)
1709 		return;
1710 
1711 	if (root->level != NIX_TXSCH_LVL_TL1) {
1712 		err = otx2_qos_txschq_config(pfvf, root);
1713 		if (err) {
1714 			netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1715 			goto root_destroy;
1716 		}
1717 	}
1718 
1719 	err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1720 	if (err) {
1721 		netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1722 		goto root_destroy;
1723 	}
1724 
1725 	otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1726 	return;
1727 
1728 root_destroy:
1729 	netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1730 	/* Free resources allocated */
1731 	otx2_qos_root_destroy(pfvf);
1732 }
1733 
otx2_setup_tc_htb(struct net_device * ndev,struct tc_htb_qopt_offload * htb)1734 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1735 {
1736 	struct otx2_nic *pfvf = netdev_priv(ndev);
1737 	int res;
1738 
1739 	switch (htb->command) {
1740 	case TC_HTB_CREATE:
1741 		return otx2_qos_root_add(pfvf, htb->parent_classid,
1742 					 htb->classid, htb->extack);
1743 	case TC_HTB_DESTROY:
1744 		return otx2_qos_root_destroy(pfvf);
1745 	case TC_HTB_LEAF_ALLOC_QUEUE:
1746 		res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1747 						htb->parent_classid,
1748 						htb->rate, htb->ceil,
1749 						htb->prio, htb->quantum,
1750 						htb->extack);
1751 		if (res < 0)
1752 			return res;
1753 		htb->qid = res;
1754 		return 0;
1755 	case TC_HTB_LEAF_TO_INNER:
1756 		return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1757 					      htb->classid, htb->rate,
1758 					      htb->ceil, htb->prio,
1759 					      htb->quantum, htb->extack);
1760 	case TC_HTB_LEAF_DEL:
1761 		return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1762 	case TC_HTB_LEAF_DEL_LAST:
1763 	case TC_HTB_LEAF_DEL_LAST_FORCE:
1764 		return otx2_qos_leaf_del_last(pfvf, htb->classid,
1765 				htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1766 					      htb->extack);
1767 	case TC_HTB_LEAF_QUERY_QUEUE:
1768 		res = otx2_get_txq_by_classid(pfvf, htb->classid);
1769 		htb->qid = res;
1770 		return 0;
1771 	case TC_HTB_NODE_MODIFY:
1772 		fallthrough;
1773 	default:
1774 		return -EOPNOTSUPP;
1775 	}
1776 }
1777