1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip lan969x Switch driver
3  *
4  * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
5  */
6 #include <net/page_pool/helpers.h>
7 
8 #include "../sparx5_main.h"
9 #include "../sparx5_main_regs.h"
10 #include "../sparx5_port.h"
11 
12 #include "fdma_api.h"
13 #include "lan969x.h"
14 
15 #define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
16 
lan969x_fdma_tx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)17 static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
18 				      u64 *dataptr)
19 {
20 	*dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
21 
22 	return 0;
23 }
24 
lan969x_fdma_rx_dataptr_cb(struct fdma * fdma,int dcb,int db,u64 * dataptr)25 static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
26 				      u64 *dataptr)
27 {
28 	struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
29 	struct page *page;
30 
31 	page = page_pool_dev_alloc_pages(rx->page_pool);
32 	if (unlikely(!page))
33 		return -ENOMEM;
34 
35 	rx->page[dcb][db] = page;
36 
37 	*dataptr = page_pool_get_dma_addr(page);
38 
39 	return 0;
40 }
41 
lan969x_fdma_get_next_dcb(struct sparx5_tx * tx)42 static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
43 {
44 	struct fdma *fdma = &tx->fdma;
45 
46 	for (int i = 0; i < fdma->n_dcbs; ++i)
47 		if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
48 			return i;
49 
50 	return -ENOSPC;
51 }
52 
lan969x_fdma_tx_clear_buf(struct sparx5 * sparx5,int weight)53 static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
54 {
55 	struct fdma *fdma = &sparx5->tx.fdma;
56 	struct sparx5_tx_buf *db;
57 	unsigned long flags;
58 	int i;
59 
60 	spin_lock_irqsave(&sparx5->tx_lock, flags);
61 
62 	for (i = 0; i < fdma->n_dcbs; ++i) {
63 		db = &sparx5->tx.dbs[i];
64 
65 		if (!db->used)
66 			continue;
67 
68 		if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
69 			continue;
70 
71 		db->dev->stats.tx_bytes += db->skb->len;
72 		db->dev->stats.tx_packets++;
73 		sparx5->tx.packets++;
74 
75 		dma_unmap_single(sparx5->dev,
76 				 db->dma_addr,
77 				 db->skb->len,
78 				 DMA_TO_DEVICE);
79 
80 		if (!db->ptp)
81 			napi_consume_skb(db->skb, weight);
82 
83 		db->used = false;
84 	}
85 
86 	spin_unlock_irqrestore(&sparx5->tx_lock, flags);
87 }
88 
lan969x_fdma_free_pages(struct sparx5_rx * rx)89 static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
90 {
91 	struct fdma *fdma = &rx->fdma;
92 
93 	for (int i = 0; i < fdma->n_dcbs; ++i) {
94 		for (int j = 0; j < fdma->n_dbs; ++j)
95 			page_pool_put_full_page(rx->page_pool,
96 						rx->page[i][j], false);
97 	}
98 }
99 
lan969x_fdma_rx_get_frame(struct sparx5 * sparx5,struct sparx5_rx * rx)100 static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
101 						 struct sparx5_rx *rx)
102 {
103 	const struct sparx5_consts *consts = sparx5->data->consts;
104 	struct fdma *fdma = &rx->fdma;
105 	struct sparx5_port *port;
106 	struct frame_info fi;
107 	struct sk_buff *skb;
108 	struct fdma_db *db;
109 	struct page *page;
110 
111 	db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
112 	page = rx->page[fdma->dcb_index][fdma->db_index];
113 
114 	sparx5_ifh_parse(sparx5, page_address(page), &fi);
115 	port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
116 					       NULL;
117 	if (WARN_ON(!port))
118 		goto free_page;
119 
120 	skb = build_skb(page_address(page), fdma->db_size);
121 	if (unlikely(!skb))
122 		goto free_page;
123 
124 	skb_mark_for_recycle(skb);
125 	skb_put(skb, fdma_db_len_get(db));
126 	skb_pull(skb, IFH_LEN * sizeof(u32));
127 
128 	skb->dev = port->ndev;
129 
130 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
131 		skb_trim(skb, skb->len - ETH_FCS_LEN);
132 
133 	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
134 	skb->protocol = eth_type_trans(skb, skb->dev);
135 
136 	if (test_bit(port->portno, sparx5->bridge_mask))
137 		skb->offload_fwd_mark = 1;
138 
139 	skb->dev->stats.rx_bytes += skb->len;
140 	skb->dev->stats.rx_packets++;
141 
142 	return skb;
143 
144 free_page:
145 	page_pool_recycle_direct(rx->page_pool, page);
146 
147 	return NULL;
148 }
149 
lan969x_fdma_rx_alloc(struct sparx5 * sparx5)150 static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
151 {
152 	struct sparx5_rx *rx = &sparx5->rx;
153 	struct fdma *fdma = &rx->fdma;
154 	int err;
155 
156 	struct page_pool_params pp_params = {
157 		.order = 0,
158 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
159 		.pool_size = fdma->n_dcbs * fdma->n_dbs,
160 		.nid = NUMA_NO_NODE,
161 		.dev = sparx5->dev,
162 		.dma_dir = DMA_FROM_DEVICE,
163 		.offset = 0,
164 		.max_len = fdma->db_size -
165 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
166 	};
167 
168 	rx->page_pool = page_pool_create(&pp_params);
169 	if (IS_ERR(rx->page_pool))
170 		return PTR_ERR(rx->page_pool);
171 
172 	err = fdma_alloc_coherent(sparx5->dev, fdma);
173 	if (err)
174 		return err;
175 
176 	fdma_dcbs_init(fdma,
177 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
178 		       FDMA_DCB_STATUS_INTR);
179 
180 	return 0;
181 }
182 
lan969x_fdma_tx_alloc(struct sparx5 * sparx5)183 static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
184 {
185 	struct sparx5_tx *tx = &sparx5->tx;
186 	struct fdma *fdma = &tx->fdma;
187 	int err;
188 
189 	tx->dbs = kcalloc(fdma->n_dcbs,
190 			  sizeof(struct sparx5_tx_buf),
191 			  GFP_KERNEL);
192 	if (!tx->dbs)
193 		return -ENOMEM;
194 
195 	err = fdma_alloc_coherent(sparx5->dev, fdma);
196 	if (err) {
197 		kfree(tx->dbs);
198 		return err;
199 	}
200 
201 	fdma_dcbs_init(fdma,
202 		       FDMA_DCB_INFO_DATAL(fdma->db_size),
203 		       FDMA_DCB_STATUS_DONE);
204 
205 	return 0;
206 }
207 
lan969x_fdma_rx_init(struct sparx5 * sparx5)208 static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
209 {
210 	struct fdma *fdma = &sparx5->rx.fdma;
211 
212 	fdma->channel_id = FDMA_XTR_CHANNEL;
213 	fdma->n_dcbs = FDMA_DCB_MAX;
214 	fdma->n_dbs = 1;
215 	fdma->priv = sparx5;
216 	fdma->size = fdma_get_size(fdma);
217 	fdma->db_size = PAGE_SIZE;
218 	fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
219 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
220 
221 	/* Fetch a netdev for SKB and NAPI use, any will do */
222 	for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
223 		struct sparx5_port *port = sparx5->ports[idx];
224 
225 		if (port && port->ndev) {
226 			sparx5->rx.ndev = port->ndev;
227 			break;
228 		}
229 	}
230 }
231 
lan969x_fdma_tx_init(struct sparx5 * sparx5)232 static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
233 {
234 	struct fdma *fdma = &sparx5->tx.fdma;
235 
236 	fdma->channel_id = FDMA_INJ_CHANNEL;
237 	fdma->n_dcbs = FDMA_DCB_MAX;
238 	fdma->n_dbs = 1;
239 	fdma->priv = sparx5;
240 	fdma->size = fdma_get_size(fdma);
241 	fdma->db_size = PAGE_SIZE;
242 	fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
243 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
244 }
245 
lan969x_fdma_napi_poll(struct napi_struct * napi,int weight)246 int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
247 {
248 	struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
249 	struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
250 	int old_dcb, dcb_reload, counter = 0;
251 	struct fdma *fdma = &rx->fdma;
252 	struct sk_buff *skb;
253 
254 	dcb_reload = fdma->dcb_index;
255 
256 	lan969x_fdma_tx_clear_buf(sparx5, weight);
257 
258 	/* Process RX data */
259 	while (counter < weight) {
260 		if (!fdma_has_frames(fdma))
261 			break;
262 
263 		skb = lan969x_fdma_rx_get_frame(sparx5, rx);
264 		if (!skb)
265 			break;
266 
267 		napi_gro_receive(&rx->napi, skb);
268 
269 		fdma_db_advance(fdma);
270 		counter++;
271 		/* Check if the DCB can be reused */
272 		if (fdma_dcb_is_reusable(fdma))
273 			continue;
274 
275 		fdma_db_reset(fdma);
276 		fdma_dcb_advance(fdma);
277 	}
278 
279 	/* Allocate new pages and map them */
280 	while (dcb_reload != fdma->dcb_index) {
281 		old_dcb = dcb_reload;
282 		dcb_reload++;
283 		 /* n_dcbs must be a power of 2 */
284 		dcb_reload &= fdma->n_dcbs - 1;
285 
286 		fdma_dcb_add(fdma,
287 			     old_dcb,
288 			     FDMA_DCB_INFO_DATAL(fdma->db_size),
289 			     FDMA_DCB_STATUS_INTR);
290 
291 		sparx5_fdma_reload(sparx5, fdma);
292 	}
293 
294 	if (counter < weight && napi_complete_done(napi, counter))
295 		spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
296 
297 	return counter;
298 }
299 
lan969x_fdma_xmit(struct sparx5 * sparx5,u32 * ifh,struct sk_buff * skb,struct net_device * dev)300 int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
301 		      struct net_device *dev)
302 {
303 	int next_dcb, needed_headroom, needed_tailroom, err;
304 	struct sparx5_tx *tx = &sparx5->tx;
305 	struct fdma *fdma = &tx->fdma;
306 	struct sparx5_tx_buf *db_buf;
307 	u64 status;
308 
309 	next_dcb = lan969x_fdma_get_next_dcb(tx);
310 	if (next_dcb < 0)
311 		return -EBUSY;
312 
313 	needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
314 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
315 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
316 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
317 				       GFP_ATOMIC);
318 		if (unlikely(err))
319 			return err;
320 	}
321 
322 	skb_push(skb, IFH_LEN * 4);
323 	memcpy(skb->data, ifh, IFH_LEN * 4);
324 	skb_put(skb, ETH_FCS_LEN);
325 
326 	db_buf = &tx->dbs[next_dcb];
327 	db_buf->dma_addr = dma_map_single(sparx5->dev,
328 					  skb->data,
329 					  skb->len,
330 					  DMA_TO_DEVICE);
331 	if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
332 		return -ENOMEM;
333 
334 	db_buf->dev = dev;
335 	db_buf->skb = skb;
336 	db_buf->ptp = false;
337 	db_buf->used = true;
338 
339 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
340 	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
341 		db_buf->ptp = true;
342 
343 	status = FDMA_DCB_STATUS_SOF |
344 		 FDMA_DCB_STATUS_EOF |
345 		 FDMA_DCB_STATUS_BLOCKO(0) |
346 		 FDMA_DCB_STATUS_BLOCKL(skb->len) |
347 		 FDMA_DCB_STATUS_INTR;
348 
349 	fdma_dcb_advance(fdma);
350 	fdma_dcb_add(fdma, next_dcb, 0, status);
351 
352 	sparx5_fdma_reload(sparx5, fdma);
353 
354 	return NETDEV_TX_OK;
355 }
356 
lan969x_fdma_init(struct sparx5 * sparx5)357 int lan969x_fdma_init(struct sparx5 *sparx5)
358 {
359 	struct sparx5_rx *rx = &sparx5->rx;
360 	int err;
361 
362 	lan969x_fdma_rx_init(sparx5);
363 	lan969x_fdma_tx_init(sparx5);
364 	sparx5_fdma_injection_mode(sparx5);
365 
366 	err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
367 	if (err) {
368 		dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
369 		return err;
370 	}
371 
372 	err = lan969x_fdma_rx_alloc(sparx5);
373 	if (err) {
374 		dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
375 			err);
376 		return err;
377 	}
378 
379 	err = lan969x_fdma_tx_alloc(sparx5);
380 	if (err) {
381 		fdma_free_coherent(sparx5->dev, &rx->fdma);
382 		dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
383 			err);
384 		return err;
385 	}
386 
387 	/* Reset FDMA state */
388 	spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
389 	spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
390 
391 	return err;
392 }
393 
lan969x_fdma_deinit(struct sparx5 * sparx5)394 int lan969x_fdma_deinit(struct sparx5 *sparx5)
395 {
396 	struct sparx5_rx *rx = &sparx5->rx;
397 	struct sparx5_tx *tx = &sparx5->tx;
398 
399 	sparx5_fdma_stop(sparx5);
400 	fdma_free_coherent(sparx5->dev, &tx->fdma);
401 	fdma_free_coherent(sparx5->dev, &rx->fdma);
402 	lan969x_fdma_free_pages(rx);
403 	page_pool_destroy(rx->page_pool);
404 
405 	return 0;
406 }
407