1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include "mlx5_core.h"
36 #include "fs_core.h"
37 #include "fs_pool.h"
38 #include "fs_cmd.h"
39
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41 /* Max number of counters to query in bulk read is 32K */
42 #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
43 #define MLX5_INIT_COUNTERS_BULK 8
44 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
45 #define MLX5_FC_POOL_USED_BUFF_RATIO 10
46
47 struct mlx5_fc_stats {
48 struct xarray counters;
49
50 struct workqueue_struct *wq;
51 struct delayed_work work;
52 unsigned long sampling_interval; /* jiffies */
53 u32 *bulk_query_out;
54 int bulk_query_len;
55 bool bulk_query_alloc_failed;
56 unsigned long next_bulk_query_alloc;
57 struct mlx5_fs_pool fc_pool;
58 };
59
60 static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
61 static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
62 static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
63 static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
64
get_init_bulk_query_len(struct mlx5_core_dev * dev)65 static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
66 {
67 return min_t(int, MLX5_INIT_COUNTERS_BULK,
68 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
69 }
70
get_max_bulk_query_len(struct mlx5_core_dev * dev)71 static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
72 {
73 return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
74 (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
75 }
76
update_counter_cache(int index,u32 * bulk_raw_data,struct mlx5_fc_cache * cache)77 static void update_counter_cache(int index, u32 *bulk_raw_data,
78 struct mlx5_fc_cache *cache)
79 {
80 void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
81 flow_statistics[index]);
82 u64 packets = MLX5_GET64(traffic_counter, stats, packets);
83 u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
84
85 if (cache->packets == packets)
86 return;
87
88 cache->packets = packets;
89 cache->bytes = bytes;
90 cache->lastuse = jiffies;
91 }
92
93 /* Synchronization notes
94 *
95 * Access to counter array:
96 * - create - mlx5_fc_create() (user context)
97 * - inserts the counter into the xarray.
98 *
99 * - destroy - mlx5_fc_destroy() (user context)
100 * - erases the counter from the xarray and releases it.
101 *
102 * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context)
103 * - user should not access a counter after destroy.
104 *
105 * - bulk query (single thread workqueue context)
106 * - create: query relies on 'lastuse' to avoid updating counters added
107 * around the same time as the current bulk cmd.
108 * - destroy: destroyed counters will not be accessed, even if they are
109 * destroyed during a bulk query command.
110 */
mlx5_fc_stats_query_all_counters(struct mlx5_core_dev * dev)111 static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
112 {
113 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
114 u32 bulk_len = fc_stats->bulk_query_len;
115 XA_STATE(xas, &fc_stats->counters, 0);
116 u32 *data = fc_stats->bulk_query_out;
117 struct mlx5_fc *counter;
118 u32 last_bulk_id = 0;
119 u64 bulk_query_time;
120 u32 bulk_base_id;
121 int err;
122
123 xas_lock(&xas);
124 xas_for_each(&xas, counter, U32_MAX) {
125 if (xas_retry(&xas, counter))
126 continue;
127 if (unlikely(counter->id >= last_bulk_id)) {
128 /* Start new bulk query. */
129 /* First id must be aligned to 4 when using bulk query. */
130 bulk_base_id = counter->id & ~0x3;
131 last_bulk_id = bulk_base_id + bulk_len;
132 /* The lock is released while querying the hw and reacquired after. */
133 xas_unlock(&xas);
134 /* The same id needs to be processed again in the next loop iteration. */
135 xas_reset(&xas);
136 bulk_query_time = jiffies;
137 err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data);
138 if (err) {
139 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
140 return;
141 }
142 xas_lock(&xas);
143 continue;
144 }
145 /* Do not update counters added after bulk query was started. */
146 if (time_after64(bulk_query_time, counter->cache.lastuse))
147 update_counter_cache(counter->id - bulk_base_id, data,
148 &counter->cache);
149 }
150 xas_unlock(&xas);
151 }
152
mlx5_fc_free(struct mlx5_core_dev * dev,struct mlx5_fc * counter)153 static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
154 {
155 mlx5_cmd_fc_free(dev, counter->id);
156 kfree(counter);
157 }
158
mlx5_fc_release(struct mlx5_core_dev * dev,struct mlx5_fc * counter)159 static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
160 {
161 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
162
163 if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
164 return;
165
166 if (counter->bulk)
167 mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
168 else
169 mlx5_fc_free(dev, counter);
170 }
171
mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev * dev,int bulk_query_len)172 static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
173 int bulk_query_len)
174 {
175 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
176 u32 *bulk_query_out_tmp;
177 int out_len;
178
179 out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len);
180 bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL);
181 if (!bulk_query_out_tmp) {
182 mlx5_core_warn_once(dev,
183 "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n",
184 bulk_query_len);
185 return;
186 }
187
188 kvfree(fc_stats->bulk_query_out);
189 fc_stats->bulk_query_out = bulk_query_out_tmp;
190 fc_stats->bulk_query_len = bulk_query_len;
191 mlx5_core_info(dev,
192 "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n",
193 bulk_query_len);
194 }
195
mlx5_fc_num_counters(struct mlx5_fc_stats * fc_stats)196 static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
197 {
198 struct mlx5_fc *counter;
199 int num_counters = 0;
200 unsigned long id;
201
202 xa_for_each(&fc_stats->counters, id, counter)
203 num_counters++;
204 return num_counters;
205 }
206
mlx5_fc_stats_work(struct work_struct * work)207 static void mlx5_fc_stats_work(struct work_struct *work)
208 {
209 struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
210 work.work);
211 struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
212
213 queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
214
215 /* Grow the bulk query buffer to max if not maxed and enough counters are present. */
216 if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
217 mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
218 mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
219
220 mlx5_fc_stats_query_all_counters(dev);
221 }
222
mlx5_fc_single_alloc(struct mlx5_core_dev * dev)223 static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
224 {
225 struct mlx5_fc *counter;
226 int err;
227
228 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
229 if (!counter)
230 return ERR_PTR(-ENOMEM);
231
232 err = mlx5_cmd_fc_alloc(dev, &counter->id);
233 if (err) {
234 kfree(counter);
235 return ERR_PTR(err);
236 }
237
238 return counter;
239 }
240
mlx5_fc_acquire(struct mlx5_core_dev * dev,bool aging)241 static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
242 {
243 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
244 struct mlx5_fc *counter;
245
246 if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
247 counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
248 if (!IS_ERR(counter))
249 return counter;
250 }
251
252 return mlx5_fc_single_alloc(dev);
253 }
254
mlx5_fc_create(struct mlx5_core_dev * dev,bool aging)255 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
256 {
257 struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
258 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
259 int err;
260
261 if (IS_ERR(counter))
262 return counter;
263
264 counter->aging = aging;
265
266 if (aging) {
267 u32 id = counter->id;
268
269 counter->cache.lastuse = jiffies;
270 counter->lastbytes = counter->cache.bytes;
271 counter->lastpackets = counter->cache.packets;
272
273 err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
274 if (err != 0)
275 goto err_out_alloc;
276 }
277
278 return counter;
279
280 err_out_alloc:
281 mlx5_fc_release(dev, counter);
282 return ERR_PTR(err);
283 }
284 EXPORT_SYMBOL(mlx5_fc_create);
285
mlx5_fc_id(struct mlx5_fc * counter)286 u32 mlx5_fc_id(struct mlx5_fc *counter)
287 {
288 return counter->id;
289 }
290 EXPORT_SYMBOL(mlx5_fc_id);
291
mlx5_fc_destroy(struct mlx5_core_dev * dev,struct mlx5_fc * counter)292 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
293 {
294 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
295
296 if (!counter)
297 return;
298
299 if (counter->aging)
300 xa_erase(&fc_stats->counters, counter->id);
301 mlx5_fc_release(dev, counter);
302 }
303 EXPORT_SYMBOL(mlx5_fc_destroy);
304
mlx5_init_fc_stats(struct mlx5_core_dev * dev)305 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
306 {
307 struct mlx5_fc_stats *fc_stats;
308
309 fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL);
310 if (!fc_stats)
311 return -ENOMEM;
312 dev->priv.fc_stats = fc_stats;
313
314 xa_init(&fc_stats->counters);
315
316 /* Allocate initial (small) bulk query buffer. */
317 mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev));
318 if (!fc_stats->bulk_query_out)
319 goto err_bulk;
320
321 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
322 if (!fc_stats->wq)
323 goto err_wq_create;
324
325 fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
326 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
327
328 mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
329 queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
330 return 0;
331
332 err_wq_create:
333 kvfree(fc_stats->bulk_query_out);
334 err_bulk:
335 kfree(fc_stats);
336 return -ENOMEM;
337 }
338
mlx5_cleanup_fc_stats(struct mlx5_core_dev * dev)339 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
340 {
341 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
342 struct mlx5_fc *counter;
343 unsigned long id;
344
345 cancel_delayed_work_sync(&fc_stats->work);
346 destroy_workqueue(fc_stats->wq);
347 fc_stats->wq = NULL;
348
349 xa_for_each(&fc_stats->counters, id, counter) {
350 xa_erase(&fc_stats->counters, id);
351 mlx5_fc_release(dev, counter);
352 }
353 xa_destroy(&fc_stats->counters);
354
355 mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
356 kvfree(fc_stats->bulk_query_out);
357 kfree(fc_stats);
358 }
359
mlx5_fc_query(struct mlx5_core_dev * dev,struct mlx5_fc * counter,u64 * packets,u64 * bytes)360 int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
361 u64 *packets, u64 *bytes)
362 {
363 return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
364 }
365 EXPORT_SYMBOL(mlx5_fc_query);
366
mlx5_fc_query_lastuse(struct mlx5_fc * counter)367 u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
368 {
369 return counter->cache.lastuse;
370 }
371
mlx5_fc_query_cached(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)372 void mlx5_fc_query_cached(struct mlx5_fc *counter,
373 u64 *bytes, u64 *packets, u64 *lastuse)
374 {
375 struct mlx5_fc_cache c;
376
377 c = counter->cache;
378
379 *bytes = c.bytes - counter->lastbytes;
380 *packets = c.packets - counter->lastpackets;
381 *lastuse = c.lastuse;
382
383 counter->lastbytes = c.bytes;
384 counter->lastpackets = c.packets;
385 }
386
mlx5_fc_query_cached_raw(struct mlx5_fc * counter,u64 * bytes,u64 * packets,u64 * lastuse)387 void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
388 u64 *bytes, u64 *packets, u64 *lastuse)
389 {
390 struct mlx5_fc_cache c = counter->cache;
391
392 *bytes = c.bytes;
393 *packets = c.packets;
394 *lastuse = c.lastuse;
395 }
396
mlx5_fc_queue_stats_work(struct mlx5_core_dev * dev,struct delayed_work * dwork,unsigned long delay)397 void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
398 struct delayed_work *dwork,
399 unsigned long delay)
400 {
401 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
402
403 queue_delayed_work(fc_stats->wq, dwork, delay);
404 }
405
mlx5_fc_update_sampling_interval(struct mlx5_core_dev * dev,unsigned long interval)406 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
407 unsigned long interval)
408 {
409 struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
410
411 fc_stats->sampling_interval = min_t(unsigned long, interval,
412 fc_stats->sampling_interval);
413 }
414
415 /* Flow counter bulks */
416
mlx5_fc_init(struct mlx5_fc * counter,struct mlx5_fc_bulk * bulk,u32 id)417 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
418 u32 id)
419 {
420 counter->bulk = bulk;
421 counter->id = id;
422 }
423
mlx5_fc_get_base_id(struct mlx5_fc * counter)424 u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
425 {
426 return counter->bulk->base_id;
427 }
428
mlx5_fc_bulk_create(struct mlx5_core_dev * dev,void * pool_ctx)429 static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
430 void *pool_ctx)
431 {
432 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
433 struct mlx5_fc_bulk *fc_bulk;
434 int bulk_len;
435 u32 base_id;
436 int i;
437
438 alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
439 bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
440
441 fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
442 if (!fc_bulk)
443 return NULL;
444
445 if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
446 goto fc_bulk_free;
447
448 if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
449 goto fs_bulk_cleanup;
450 fc_bulk->base_id = base_id;
451 for (i = 0; i < bulk_len; i++)
452 mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
453
454 refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
455 mutex_init(&fc_bulk->hws_data.lock);
456 return &fc_bulk->fs_bulk;
457
458 fs_bulk_cleanup:
459 mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
460 fc_bulk_free:
461 kvfree(fc_bulk);
462 return NULL;
463 }
464
465 static int
mlx5_fc_bulk_destroy(struct mlx5_core_dev * dev,struct mlx5_fs_bulk * fs_bulk)466 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
467 {
468 struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
469 struct mlx5_fc_bulk,
470 fs_bulk);
471
472 if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
473 mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
474 return -EBUSY;
475 }
476
477 mlx5_cmd_fc_free(dev, fc_bulk->base_id);
478 mlx5_fs_bulk_cleanup(fs_bulk);
479 kvfree(fc_bulk);
480
481 return 0;
482 }
483
mlx5_fc_pool_update_threshold(struct mlx5_fs_pool * fc_pool)484 static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
485 {
486 fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
487 fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
488 }
489
490 /* Flow counters pool API */
491
492 static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
493 .bulk_destroy = mlx5_fc_bulk_destroy,
494 .bulk_create = mlx5_fc_bulk_create,
495 .update_threshold = mlx5_fc_pool_update_threshold,
496 };
497
498 static void
mlx5_fc_pool_init(struct mlx5_fs_pool * fc_pool,struct mlx5_core_dev * dev)499 mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
500 {
501 mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
502 }
503
mlx5_fc_pool_cleanup(struct mlx5_fs_pool * fc_pool)504 static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
505 {
506 mlx5_fs_pool_cleanup(fc_pool);
507 }
508
509 static struct mlx5_fc *
mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool * fc_pool)510 mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
511 {
512 struct mlx5_fs_pool_index pool_index = {};
513 struct mlx5_fc_bulk *fc_bulk;
514 int err;
515
516 err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
517 if (err)
518 return ERR_PTR(err);
519 fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
520 return &fc_bulk->fcs[pool_index.index];
521 }
522
523 static void
mlx5_fc_pool_release_counter(struct mlx5_fs_pool * fc_pool,struct mlx5_fc * fc)524 mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
525 {
526 struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
527 struct mlx5_fs_pool_index pool_index = {};
528 struct mlx5_core_dev *dev = fc_pool->dev;
529
530 pool_index.fs_bulk = fs_bulk;
531 pool_index.index = fc->id - fc->bulk->base_id;
532 if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
533 mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
534 }
535
536 /**
537 * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
538 * was already acquired using its counter id and bulk data.
539 *
540 * @counter_id: counter acquired counter id
541 * @offset: counter offset from bulk base
542 * @bulk_size: counter's bulk size as was allocated
543 *
544 * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
545 */
546 struct mlx5_fc *
mlx5_fc_local_create(u32 counter_id,u32 offset,u32 bulk_size)547 mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
548 {
549 struct mlx5_fc_bulk *fc_bulk;
550 struct mlx5_fc *counter;
551
552 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
553 if (!counter)
554 return ERR_PTR(-ENOMEM);
555 fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
556 if (!fc_bulk) {
557 kfree(counter);
558 return ERR_PTR(-ENOMEM);
559 }
560
561 counter->type = MLX5_FC_TYPE_LOCAL;
562 counter->id = counter_id;
563 fc_bulk->base_id = counter_id - offset;
564 fc_bulk->fs_bulk.bulk_len = bulk_size;
565 counter->bulk = fc_bulk;
566 return counter;
567 }
568 EXPORT_SYMBOL(mlx5_fc_local_create);
569
mlx5_fc_local_destroy(struct mlx5_fc * counter)570 void mlx5_fc_local_destroy(struct mlx5_fc *counter)
571 {
572 if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
573 return;
574
575 kfree(counter->bulk);
576 kfree(counter);
577 }
578 EXPORT_SYMBOL(mlx5_fc_local_destroy);
579