1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell Octeon EP (EndPoint) VF Ethernet Driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/pci.h>
9 #include <linux/etherdevice.h>
10 #include <linux/vmalloc.h>
11 #include <net/netdev_queues.h>
12
13 #include "octep_vf_config.h"
14 #include "octep_vf_main.h"
15
16 /* Reset various index of Tx queue data structure. */
octep_vf_iq_reset_indices(struct octep_vf_iq * iq)17 static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
18 {
19 iq->fill_cnt = 0;
20 iq->host_write_index = 0;
21 iq->octep_vf_read_index = 0;
22 iq->flush_index = 0;
23 iq->pkts_processed = 0;
24 iq->pkt_in_done = 0;
25 }
26
27 /**
28 * octep_vf_iq_process_completions() - Process Tx queue completions.
29 *
30 * @iq: Octeon Tx queue data structure.
31 * @budget: max number of completions to be processed in one invocation.
32 */
octep_vf_iq_process_completions(struct octep_vf_iq * iq,u16 budget)33 int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
34 {
35 u32 compl_pkts, compl_bytes, compl_sg;
36 struct octep_vf_device *oct = iq->octep_vf_dev;
37 struct octep_vf_tx_buffer *tx_buffer;
38 struct skb_shared_info *shinfo;
39 u32 fi = iq->flush_index;
40 struct sk_buff *skb;
41 u8 frags, i;
42
43 compl_pkts = 0;
44 compl_sg = 0;
45 compl_bytes = 0;
46 iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
47
48 while (likely(budget && (fi != iq->octep_vf_read_index))) {
49 tx_buffer = iq->buff_info + fi;
50 skb = tx_buffer->skb;
51
52 fi++;
53 if (unlikely(fi == iq->max_count))
54 fi = 0;
55 compl_bytes += skb->len;
56 compl_pkts++;
57 budget--;
58
59 if (!tx_buffer->gather) {
60 dma_unmap_single(iq->dev, tx_buffer->dma,
61 tx_buffer->skb->len, DMA_TO_DEVICE);
62 dev_kfree_skb_any(skb);
63 continue;
64 }
65
66 /* Scatter/Gather */
67 shinfo = skb_shinfo(skb);
68 frags = shinfo->nr_frags;
69 compl_sg++;
70
71 dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
72 tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
73
74 i = 1; /* entry 0 is main skb, unmapped above */
75 while (frags--) {
76 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
77 tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
78 i++;
79 }
80
81 dev_kfree_skb_any(skb);
82 }
83
84 iq->pkts_processed += compl_pkts;
85 iq->stats->instr_completed += compl_pkts;
86 iq->stats->bytes_sent += compl_bytes;
87 iq->stats->sgentry_sent += compl_sg;
88 iq->flush_index = fi;
89
90 netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
91 compl_bytes, IQ_INSTR_SPACE(iq),
92 OCTEP_VF_WAKE_QUEUE_THRESHOLD);
93
94 return !budget;
95 }
96
97 /**
98 * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
99 *
100 * @iq: Octeon Tx queue data structure.
101 */
octep_vf_iq_free_pending(struct octep_vf_iq * iq)102 static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
103 {
104 struct octep_vf_tx_buffer *tx_buffer;
105 struct skb_shared_info *shinfo;
106 u32 fi = iq->flush_index;
107 struct sk_buff *skb;
108 u8 frags, i;
109
110 while (fi != iq->host_write_index) {
111 tx_buffer = iq->buff_info + fi;
112 skb = tx_buffer->skb;
113
114 fi++;
115 if (unlikely(fi == iq->max_count))
116 fi = 0;
117
118 if (!tx_buffer->gather) {
119 dma_unmap_single(iq->dev, tx_buffer->dma,
120 tx_buffer->skb->len, DMA_TO_DEVICE);
121 dev_kfree_skb_any(skb);
122 continue;
123 }
124
125 /* Scatter/Gather */
126 shinfo = skb_shinfo(skb);
127 frags = shinfo->nr_frags;
128
129 dma_unmap_single(iq->dev,
130 tx_buffer->sglist[0].dma_ptr[0],
131 tx_buffer->sglist[0].len[0],
132 DMA_TO_DEVICE);
133
134 i = 1; /* entry 0 is main skb, unmapped above */
135 while (frags--) {
136 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
137 tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
138 i++;
139 }
140
141 dev_kfree_skb_any(skb);
142 }
143
144 iq->flush_index = fi;
145 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
146 }
147
148 /**
149 * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
150 *
151 * @oct: Octeon device private data structure.
152 *
153 * Free the buffers in Tx queue descriptors pending completion and
154 * reset queue indices
155 */
octep_vf_clean_iqs(struct octep_vf_device * oct)156 void octep_vf_clean_iqs(struct octep_vf_device *oct)
157 {
158 int i;
159
160 for (i = 0; i < oct->num_iqs; i++) {
161 octep_vf_iq_free_pending(oct->iq[i]);
162 octep_vf_iq_reset_indices(oct->iq[i]);
163 }
164 }
165
166 /**
167 * octep_vf_setup_iq() - Setup a Tx queue.
168 *
169 * @oct: Octeon device private data structure.
170 * @q_no: Tx queue number to be setup.
171 *
172 * Allocate resources for a Tx queue.
173 */
octep_vf_setup_iq(struct octep_vf_device * oct,int q_no)174 static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
175 {
176 u32 desc_ring_size, buff_info_size, sglist_size;
177 struct octep_vf_iq *iq;
178 int i;
179
180 iq = vzalloc(sizeof(*iq));
181 if (!iq)
182 goto iq_alloc_err;
183 oct->iq[q_no] = iq;
184
185 iq->octep_vf_dev = oct;
186 iq->netdev = oct->netdev;
187 iq->dev = &oct->pdev->dev;
188 iq->q_no = q_no;
189 iq->stats = &oct->stats_iq[q_no];
190 iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
191 iq->ring_size_mask = iq->max_count - 1;
192 iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
193 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
194
195 /* Allocate memory for hardware queue descriptors */
196 desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
197 iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
198 &iq->desc_ring_dma, GFP_KERNEL);
199 if (unlikely(!iq->desc_ring)) {
200 dev_err(iq->dev,
201 "Failed to allocate DMA memory for IQ-%d\n", q_no);
202 goto desc_dma_alloc_err;
203 }
204
205 /* Allocate memory for hardware SGLIST descriptors */
206 sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
207 CFG_GET_IQ_NUM_DESC(oct->conf);
208 iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
209 &iq->sglist_dma, GFP_KERNEL);
210 if (unlikely(!iq->sglist)) {
211 dev_err(iq->dev,
212 "Failed to allocate DMA memory for IQ-%d SGLIST\n",
213 q_no);
214 goto sglist_alloc_err;
215 }
216
217 /* allocate memory to manage Tx packets pending completion */
218 buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
219 iq->buff_info = vzalloc(buff_info_size);
220 if (!iq->buff_info) {
221 dev_err(iq->dev,
222 "Failed to allocate buff info for IQ-%d\n", q_no);
223 goto buff_info_err;
224 }
225
226 /* Setup sglist addresses in tx_buffer entries */
227 for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
228 struct octep_vf_tx_buffer *tx_buffer;
229
230 tx_buffer = &iq->buff_info[i];
231 tx_buffer->sglist =
232 &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
233 tx_buffer->sglist_dma =
234 iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
235 }
236
237 octep_vf_iq_reset_indices(iq);
238 oct->hw_ops.setup_iq_regs(oct, q_no);
239
240 oct->num_iqs++;
241 return 0;
242
243 buff_info_err:
244 dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
245 sglist_alloc_err:
246 dma_free_coherent(iq->dev, desc_ring_size,
247 iq->desc_ring, iq->desc_ring_dma);
248 desc_dma_alloc_err:
249 vfree(iq);
250 oct->iq[q_no] = NULL;
251 iq_alloc_err:
252 return -1;
253 }
254
255 /**
256 * octep_vf_free_iq() - Free Tx queue resources.
257 *
258 * @iq: Octeon Tx queue data structure.
259 *
260 * Free all the resources allocated for a Tx queue.
261 */
octep_vf_free_iq(struct octep_vf_iq * iq)262 static void octep_vf_free_iq(struct octep_vf_iq *iq)
263 {
264 struct octep_vf_device *oct = iq->octep_vf_dev;
265 u64 desc_ring_size, sglist_size;
266 int q_no = iq->q_no;
267
268 desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
269
270 vfree(iq->buff_info);
271
272 if (iq->desc_ring)
273 dma_free_coherent(iq->dev, desc_ring_size,
274 iq->desc_ring, iq->desc_ring_dma);
275
276 sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
277 CFG_GET_IQ_NUM_DESC(oct->conf);
278 if (iq->sglist)
279 dma_free_coherent(iq->dev, sglist_size,
280 iq->sglist, iq->sglist_dma);
281
282 vfree(iq);
283 oct->iq[q_no] = NULL;
284 oct->num_iqs--;
285 }
286
287 /**
288 * octep_vf_setup_iqs() - setup resources for all Tx queues.
289 *
290 * @oct: Octeon device private data structure.
291 */
octep_vf_setup_iqs(struct octep_vf_device * oct)292 int octep_vf_setup_iqs(struct octep_vf_device *oct)
293 {
294 int i;
295
296 oct->num_iqs = 0;
297 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
298 if (octep_vf_setup_iq(oct, i)) {
299 dev_err(&oct->pdev->dev,
300 "Failed to setup IQ(TxQ)-%d.\n", i);
301 goto iq_setup_err;
302 }
303 dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
304 }
305
306 return 0;
307
308 iq_setup_err:
309 while (i) {
310 i--;
311 octep_vf_free_iq(oct->iq[i]);
312 }
313 return -1;
314 }
315
316 /**
317 * octep_vf_free_iqs() - Free resources of all Tx queues.
318 *
319 * @oct: Octeon device private data structure.
320 */
octep_vf_free_iqs(struct octep_vf_device * oct)321 void octep_vf_free_iqs(struct octep_vf_device *oct)
322 {
323 int i;
324
325 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
326 octep_vf_free_iq(oct->iq[i]);
327 dev_dbg(&oct->pdev->dev,
328 "Successfully destroyed IQ(TxQ)-%d.\n", i);
329 }
330 oct->num_iqs = 0;
331 }
332