1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2024 Google, Inc.
5 */
6
7 #include "gve.h"
8 #include "gve_utils.h"
9
gve_buf_ref_cnt(struct gve_rx_buf_state_dqo * bs)10 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
11 {
12 return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
13 }
14
gve_alloc_buf_state(struct gve_rx_ring * rx)15 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
16 {
17 struct gve_rx_buf_state_dqo *buf_state;
18 s16 buffer_id;
19
20 buffer_id = rx->dqo.free_buf_states;
21 if (unlikely(buffer_id == -1))
22 return NULL;
23
24 buf_state = &rx->dqo.buf_states[buffer_id];
25
26 /* Remove buf_state from free list */
27 rx->dqo.free_buf_states = buf_state->next;
28
29 /* Point buf_state to itself to mark it as allocated */
30 buf_state->next = buffer_id;
31
32 return buf_state;
33 }
34
gve_buf_state_is_allocated(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)35 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
36 struct gve_rx_buf_state_dqo *buf_state)
37 {
38 s16 buffer_id = buf_state - rx->dqo.buf_states;
39
40 return buf_state->next == buffer_id;
41 }
42
gve_free_buf_state(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)43 void gve_free_buf_state(struct gve_rx_ring *rx,
44 struct gve_rx_buf_state_dqo *buf_state)
45 {
46 s16 buffer_id = buf_state - rx->dqo.buf_states;
47
48 buf_state->next = rx->dqo.free_buf_states;
49 rx->dqo.free_buf_states = buffer_id;
50 }
51
gve_dequeue_buf_state(struct gve_rx_ring * rx,struct gve_index_list * list)52 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
53 struct gve_index_list *list)
54 {
55 struct gve_rx_buf_state_dqo *buf_state;
56 s16 buffer_id;
57
58 buffer_id = list->head;
59 if (unlikely(buffer_id == -1))
60 return NULL;
61
62 buf_state = &rx->dqo.buf_states[buffer_id];
63
64 /* Remove buf_state from list */
65 list->head = buf_state->next;
66 if (buf_state->next == -1)
67 list->tail = -1;
68
69 /* Point buf_state to itself to mark it as allocated */
70 buf_state->next = buffer_id;
71
72 return buf_state;
73 }
74
gve_enqueue_buf_state(struct gve_rx_ring * rx,struct gve_index_list * list,struct gve_rx_buf_state_dqo * buf_state)75 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
76 struct gve_rx_buf_state_dqo *buf_state)
77 {
78 s16 buffer_id = buf_state - rx->dqo.buf_states;
79
80 buf_state->next = -1;
81
82 if (list->head == -1) {
83 list->head = buffer_id;
84 list->tail = buffer_id;
85 } else {
86 int tail = list->tail;
87
88 rx->dqo.buf_states[tail].next = buffer_id;
89 list->tail = buffer_id;
90 }
91 }
92
gve_get_recycled_buf_state(struct gve_rx_ring * rx)93 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
94 {
95 struct gve_rx_buf_state_dqo *buf_state;
96 int i;
97
98 /* Recycled buf states are immediately usable. */
99 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
100 if (likely(buf_state))
101 return buf_state;
102
103 if (unlikely(rx->dqo.used_buf_states.head == -1))
104 return NULL;
105
106 /* Used buf states are only usable when ref count reaches 0, which means
107 * no SKBs refer to them.
108 *
109 * Search a limited number before giving up.
110 */
111 for (i = 0; i < 5; i++) {
112 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
113 if (gve_buf_ref_cnt(buf_state) == 0) {
114 rx->dqo.used_buf_states_cnt--;
115 return buf_state;
116 }
117
118 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
119 }
120
121 return NULL;
122 }
123
gve_alloc_qpl_page_dqo(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)124 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
125 struct gve_rx_buf_state_dqo *buf_state)
126 {
127 struct gve_priv *priv = rx->gve;
128 u32 idx;
129
130 idx = rx->dqo.next_qpl_page_idx;
131 if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
132 net_err_ratelimited("%s: Out of QPL pages\n",
133 priv->dev->name);
134 return -ENOMEM;
135 }
136 buf_state->page_info.page = rx->dqo.qpl->pages[idx];
137 buf_state->addr = rx->dqo.qpl->page_buses[idx];
138 rx->dqo.next_qpl_page_idx++;
139 buf_state->page_info.page_offset = 0;
140 buf_state->page_info.page_address =
141 page_address(buf_state->page_info.page);
142 buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
143 buf_state->last_single_ref_offset = 0;
144
145 /* The page already has 1 ref. */
146 page_ref_add(buf_state->page_info.page, INT_MAX - 1);
147 buf_state->page_info.pagecnt_bias = INT_MAX;
148
149 return 0;
150 }
151
gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo * buf_state)152 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
153 {
154 if (!buf_state->page_info.page)
155 return;
156
157 page_ref_sub(buf_state->page_info.page,
158 buf_state->page_info.pagecnt_bias - 1);
159 buf_state->page_info.page = NULL;
160 }
161
gve_try_recycle_buf(struct gve_priv * priv,struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)162 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
163 struct gve_rx_buf_state_dqo *buf_state)
164 {
165 const u16 data_buffer_size = priv->data_buffer_size_dqo;
166 int pagecount;
167
168 /* Can't reuse if we only fit one buffer per page */
169 if (data_buffer_size * 2 > PAGE_SIZE)
170 goto mark_used;
171
172 pagecount = gve_buf_ref_cnt(buf_state);
173
174 /* Record the offset when we have a single remaining reference.
175 *
176 * When this happens, we know all of the other offsets of the page are
177 * usable.
178 */
179 if (pagecount == 1) {
180 buf_state->last_single_ref_offset =
181 buf_state->page_info.page_offset;
182 }
183
184 /* Use the next buffer sized chunk in the page. */
185 buf_state->page_info.page_offset += data_buffer_size;
186 buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
187
188 /* If we wrap around to the same offset without ever dropping to 1
189 * reference, then we don't know if this offset was ever freed.
190 */
191 if (buf_state->page_info.page_offset ==
192 buf_state->last_single_ref_offset) {
193 goto mark_used;
194 }
195
196 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
197 return;
198
199 mark_used:
200 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
201 rx->dqo.used_buf_states_cnt++;
202 }
203
gve_free_to_page_pool(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state,bool allow_direct)204 void gve_free_to_page_pool(struct gve_rx_ring *rx,
205 struct gve_rx_buf_state_dqo *buf_state,
206 bool allow_direct)
207 {
208 struct page *page = buf_state->page_info.page;
209
210 if (!page)
211 return;
212
213 page_pool_put_full_page(page->pp, page, allow_direct);
214 buf_state->page_info.page = NULL;
215 }
216
gve_alloc_from_page_pool(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)217 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
218 struct gve_rx_buf_state_dqo *buf_state)
219 {
220 struct gve_priv *priv = rx->gve;
221 struct page *page;
222
223 buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
224 page = page_pool_alloc(rx->dqo.page_pool,
225 &buf_state->page_info.page_offset,
226 &buf_state->page_info.buf_size, GFP_ATOMIC);
227
228 if (!page)
229 return -ENOMEM;
230
231 buf_state->page_info.page = page;
232 buf_state->page_info.page_address = page_address(page);
233 buf_state->addr = page_pool_get_dma_addr(page);
234
235 return 0;
236 }
237
gve_rx_create_page_pool(struct gve_priv * priv,struct gve_rx_ring * rx)238 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
239 struct gve_rx_ring *rx)
240 {
241 u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
242 struct page_pool_params pp = {
243 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
244 .order = 0,
245 .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
246 .dev = &priv->pdev->dev,
247 .netdev = priv->dev,
248 .napi = &priv->ntfy_blocks[ntfy_id].napi,
249 .max_len = PAGE_SIZE,
250 .dma_dir = DMA_FROM_DEVICE,
251 };
252
253 return page_pool_create(&pp);
254 }
255
gve_free_buffer(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)256 void gve_free_buffer(struct gve_rx_ring *rx,
257 struct gve_rx_buf_state_dqo *buf_state)
258 {
259 if (rx->dqo.page_pool) {
260 gve_free_to_page_pool(rx, buf_state, true);
261 gve_free_buf_state(rx, buf_state);
262 } else {
263 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
264 buf_state);
265 }
266 }
267
gve_reuse_buffer(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)268 void gve_reuse_buffer(struct gve_rx_ring *rx,
269 struct gve_rx_buf_state_dqo *buf_state)
270 {
271 if (rx->dqo.page_pool) {
272 buf_state->page_info.page = NULL;
273 gve_free_buf_state(rx, buf_state);
274 } else {
275 gve_dec_pagecnt_bias(&buf_state->page_info);
276 gve_try_recycle_buf(rx->gve, rx, buf_state);
277 }
278 }
279
gve_alloc_buffer(struct gve_rx_ring * rx,struct gve_rx_desc_dqo * desc)280 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
281 {
282 struct gve_rx_buf_state_dqo *buf_state;
283
284 if (rx->dqo.page_pool) {
285 buf_state = gve_alloc_buf_state(rx);
286 if (WARN_ON_ONCE(!buf_state))
287 return -ENOMEM;
288
289 if (gve_alloc_from_page_pool(rx, buf_state))
290 goto free_buf_state;
291 } else {
292 buf_state = gve_get_recycled_buf_state(rx);
293 if (unlikely(!buf_state)) {
294 buf_state = gve_alloc_buf_state(rx);
295 if (unlikely(!buf_state))
296 return -ENOMEM;
297
298 if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
299 goto free_buf_state;
300 }
301 }
302 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
303 desc->buf_addr = cpu_to_le64(buf_state->addr +
304 buf_state->page_info.page_offset);
305
306 return 0;
307
308 free_buf_state:
309 gve_free_buf_state(rx, buf_state);
310 return -ENOMEM;
311 }
312