Lines Matching +full:dma +full:- +full:queues
1 // SPDX-License-Identifier: GPL-2.0-only
7 * idpf_ctlq_setup_regs - initialize control queue registers
15 cq->reg.head = q_create_info->reg.head; in idpf_ctlq_setup_regs()
16 cq->reg.tail = q_create_info->reg.tail; in idpf_ctlq_setup_regs()
17 cq->reg.len = q_create_info->reg.len; in idpf_ctlq_setup_regs()
18 cq->reg.bah = q_create_info->reg.bah; in idpf_ctlq_setup_regs()
19 cq->reg.bal = q_create_info->reg.bal; in idpf_ctlq_setup_regs()
20 cq->reg.len_mask = q_create_info->reg.len_mask; in idpf_ctlq_setup_regs()
21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask; in idpf_ctlq_setup_regs()
22 cq->reg.head_mask = q_create_info->reg.head_mask; in idpf_ctlq_setup_regs()
26 * idpf_ctlq_init_regs - Initialize control queue registers
37 /* Update tail to post pre-allocated buffers for rx queues */ in idpf_ctlq_init_regs()
39 wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); in idpf_ctlq_init_regs()
41 /* For non-Mailbox control queues only TAIL need to be set */ in idpf_ctlq_init_regs()
42 if (cq->q_id != -1) in idpf_ctlq_init_regs()
46 wr32(hw, cq->reg.head, 0); in idpf_ctlq_init_regs()
49 wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); in idpf_ctlq_init_regs()
50 wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); in idpf_ctlq_init_regs()
51 wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); in idpf_ctlq_init_regs()
55 * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
58 * Record the address of the receive queue DMA buffers in the descriptors.
65 for (i = 0; i < cq->ring_size; i++) { in idpf_ctlq_init_rxq_bufs()
67 struct idpf_dma_mem *bi = cq->bi.rx_buff[i]; in idpf_ctlq_init_rxq_bufs()
73 desc->flags = in idpf_ctlq_init_rxq_bufs()
75 desc->opcode = 0; in idpf_ctlq_init_rxq_bufs()
76 desc->datalen = cpu_to_le16(bi->size); in idpf_ctlq_init_rxq_bufs()
77 desc->ret_val = 0; in idpf_ctlq_init_rxq_bufs()
78 desc->v_opcode_dtype = 0; in idpf_ctlq_init_rxq_bufs()
79 desc->v_retval = 0; in idpf_ctlq_init_rxq_bufs()
80 desc->params.indirect.addr_high = in idpf_ctlq_init_rxq_bufs()
81 cpu_to_le32(upper_32_bits(bi->pa)); in idpf_ctlq_init_rxq_bufs()
82 desc->params.indirect.addr_low = in idpf_ctlq_init_rxq_bufs()
83 cpu_to_le32(lower_32_bits(bi->pa)); in idpf_ctlq_init_rxq_bufs()
84 desc->params.indirect.param0 = 0; in idpf_ctlq_init_rxq_bufs()
85 desc->params.indirect.sw_cookie = 0; in idpf_ctlq_init_rxq_bufs()
86 desc->params.indirect.v_flags = 0; in idpf_ctlq_init_rxq_bufs()
91 * idpf_ctlq_shutdown - shutdown the CQ
99 mutex_lock(&cq->cq_lock); in idpf_ctlq_shutdown()
105 cq->ring_size = 0; in idpf_ctlq_shutdown()
107 mutex_unlock(&cq->cq_lock); in idpf_ctlq_shutdown()
108 mutex_destroy(&cq->cq_lock); in idpf_ctlq_shutdown()
112 * idpf_ctlq_add - add one control queue
133 return -ENOMEM; in idpf_ctlq_add()
135 cq->cq_type = qinfo->type; in idpf_ctlq_add()
136 cq->q_id = qinfo->id; in idpf_ctlq_add()
137 cq->buf_size = qinfo->buf_size; in idpf_ctlq_add()
138 cq->ring_size = qinfo->len; in idpf_ctlq_add()
140 cq->next_to_use = 0; in idpf_ctlq_add()
141 cq->next_to_clean = 0; in idpf_ctlq_add()
142 cq->next_to_post = cq->ring_size - 1; in idpf_ctlq_add()
144 switch (qinfo->type) { in idpf_ctlq_add()
152 err = -EBADR; in idpf_ctlq_add()
162 /* Allocate the array of msg pointers for TX queues */ in idpf_ctlq_add()
163 cq->bi.tx_msg = kcalloc(qinfo->len, in idpf_ctlq_add()
166 if (!cq->bi.tx_msg) { in idpf_ctlq_add()
167 err = -ENOMEM; in idpf_ctlq_add()
176 mutex_init(&cq->cq_lock); in idpf_ctlq_add()
178 list_add(&cq->cq_list, &hw->cq_list_head); in idpf_ctlq_add()
194 * idpf_ctlq_remove - deallocate and remove specified control queue
201 list_del(&cq->cq_list); in idpf_ctlq_remove()
207 * idpf_ctlq_init - main initialization routine for all control queues
209 * @num_q: number of queues to initialize
212 * This initializes any number and any type of control queues. This is an all
213 * or nothing routine; if one fails, all previously allocated queues will be
224 INIT_LIST_HEAD(&hw->cq_list_head); in idpf_ctlq_init()
237 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) in idpf_ctlq_init()
244 * idpf_ctlq_deinit - destroy all control queues
251 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) in idpf_ctlq_deinit()
256 * idpf_ctlq_send - send command to Control Queue (CTQ)
275 mutex_lock(&cq->cq_lock); in idpf_ctlq_send()
280 err = -ENOSPC; in idpf_ctlq_send()
287 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); in idpf_ctlq_send()
289 desc->opcode = cpu_to_le16(msg->opcode); in idpf_ctlq_send()
290 desc->pfid_vfid = cpu_to_le16(msg->func_id); in idpf_ctlq_send()
292 desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode); in idpf_ctlq_send()
293 desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval); in idpf_ctlq_send()
295 desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) << in idpf_ctlq_send()
297 if (msg->data_len) { in idpf_ctlq_send()
298 struct idpf_dma_mem *buff = msg->ctx.indirect.payload; in idpf_ctlq_send()
300 desc->datalen |= cpu_to_le16(msg->data_len); in idpf_ctlq_send()
301 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF); in idpf_ctlq_send()
302 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD); in idpf_ctlq_send()
307 desc->params.indirect.addr_high = in idpf_ctlq_send()
308 cpu_to_le32(upper_32_bits(buff->pa)); in idpf_ctlq_send()
309 desc->params.indirect.addr_low = in idpf_ctlq_send()
310 cpu_to_le32(lower_32_bits(buff->pa)); in idpf_ctlq_send()
312 memcpy(&desc->params, msg->ctx.indirect.context, in idpf_ctlq_send()
315 memcpy(&desc->params, msg->ctx.direct, in idpf_ctlq_send()
320 cq->bi.tx_msg[cq->next_to_use] = msg; in idpf_ctlq_send()
322 (cq->next_to_use)++; in idpf_ctlq_send()
323 if (cq->next_to_use == cq->ring_size) in idpf_ctlq_send()
324 cq->next_to_use = 0; in idpf_ctlq_send()
332 wr32(hw, cq->reg.tail, cq->next_to_use); in idpf_ctlq_send()
335 mutex_unlock(&cq->cq_lock); in idpf_ctlq_send()
341 * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
352 * to send will have a non-zero status. The caller is expected to free original
353 * ctlq_msgs and free or reuse the DMA buffers.
364 if (*clean_count > cq->ring_size) in idpf_ctlq_clean_sq()
365 return -EBADR; in idpf_ctlq_clean_sq()
367 mutex_lock(&cq->cq_lock); in idpf_ctlq_clean_sq()
369 ntc = cq->next_to_clean; in idpf_ctlq_clean_sq()
376 if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) in idpf_ctlq_clean_sq()
383 desc_err = le16_to_cpu(desc->ret_val) & 0xff; in idpf_ctlq_clean_sq()
385 msg_status[i] = cq->bi.tx_msg[ntc]; in idpf_ctlq_clean_sq()
386 msg_status[i]->status = desc_err; in idpf_ctlq_clean_sq()
388 cq->bi.tx_msg[ntc] = NULL; in idpf_ctlq_clean_sq()
394 if (ntc == cq->ring_size) in idpf_ctlq_clean_sq()
398 cq->next_to_clean = ntc; in idpf_ctlq_clean_sq()
400 mutex_unlock(&cq->cq_lock); in idpf_ctlq_clean_sq()
409 * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
414 * @buffs: array of pointers to dma mem structs to be given to hardware
416 * Caller uses this function to return DMA buffers to the descriptor ring after
420 * if there are no DMA buffers to be returned, i.e. buff_count = 0,
427 u16 ntp = cq->next_to_post; in idpf_ctlq_post_rx_buffs()
432 if (*buff_count > cq->ring_size) in idpf_ctlq_post_rx_buffs()
433 return -EBADR; in idpf_ctlq_post_rx_buffs()
438 mutex_lock(&cq->cq_lock); in idpf_ctlq_post_rx_buffs()
440 if (tbp >= cq->ring_size) in idpf_ctlq_post_rx_buffs()
443 if (tbp == cq->next_to_clean) in idpf_ctlq_post_rx_buffs()
448 while (ntp != cq->next_to_clean) { in idpf_ctlq_post_rx_buffs()
451 if (cq->bi.rx_buff[ntp]) in idpf_ctlq_post_rx_buffs()
462 if (tbp >= cq->ring_size) in idpf_ctlq_post_rx_buffs()
465 while (tbp != cq->next_to_clean) { in idpf_ctlq_post_rx_buffs()
466 if (cq->bi.rx_buff[tbp]) { in idpf_ctlq_post_rx_buffs()
467 cq->bi.rx_buff[ntp] = in idpf_ctlq_post_rx_buffs()
468 cq->bi.rx_buff[tbp]; in idpf_ctlq_post_rx_buffs()
469 cq->bi.rx_buff[tbp] = NULL; in idpf_ctlq_post_rx_buffs()
479 if (tbp >= cq->ring_size) in idpf_ctlq_post_rx_buffs()
483 if (tbp == cq->next_to_clean) in idpf_ctlq_post_rx_buffs()
486 /* Give back pointer to DMA buffer */ in idpf_ctlq_post_rx_buffs()
487 cq->bi.rx_buff[ntp] = buffs[i]; in idpf_ctlq_post_rx_buffs()
495 desc->flags = in idpf_ctlq_post_rx_buffs()
499 desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size); in idpf_ctlq_post_rx_buffs()
500 desc->params.indirect.addr_high = in idpf_ctlq_post_rx_buffs()
501 cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa)); in idpf_ctlq_post_rx_buffs()
502 desc->params.indirect.addr_low = in idpf_ctlq_post_rx_buffs()
503 cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa)); in idpf_ctlq_post_rx_buffs()
506 if (ntp == cq->ring_size) in idpf_ctlq_post_rx_buffs()
512 if (cq->next_to_post != ntp) { in idpf_ctlq_post_rx_buffs()
514 /* Update next_to_post to ntp - 1 since current ntp in idpf_ctlq_post_rx_buffs()
517 cq->next_to_post = ntp - 1; in idpf_ctlq_post_rx_buffs()
520 cq->next_to_post = cq->ring_size - 1; in idpf_ctlq_post_rx_buffs()
524 wr32(hw, cq->reg.tail, cq->next_to_post); in idpf_ctlq_post_rx_buffs()
527 mutex_unlock(&cq->cq_lock); in idpf_ctlq_post_rx_buffs()
530 *buff_count = *buff_count - i; in idpf_ctlq_post_rx_buffs()
536 * idpf_ctlq_recv - receive control queue message call back
541 * needs to be pre-allocated by caller for as many messages as requested
555 mutex_lock(&cq->cq_lock); in idpf_ctlq_recv()
557 ntc = cq->next_to_clean; in idpf_ctlq_recv()
564 flags = le16_to_cpu(desc->flags); in idpf_ctlq_recv()
578 err = -EBADMSG; in idpf_ctlq_recv()
581 le32_to_cpu(desc->v_opcode_dtype); in idpf_ctlq_recv()
583 le32_to_cpu(desc->v_retval); in idpf_ctlq_recv()
585 q_msg[i].opcode = le16_to_cpu(desc->opcode); in idpf_ctlq_recv()
586 q_msg[i].data_len = le16_to_cpu(desc->datalen); in idpf_ctlq_recv()
587 q_msg[i].status = le16_to_cpu(desc->ret_val); in idpf_ctlq_recv()
589 if (desc->datalen) { in idpf_ctlq_recv()
591 &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE); in idpf_ctlq_recv()
593 /* Assign pointer to dma buffer to ctlq_msg array in idpf_ctlq_recv()
596 q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; in idpf_ctlq_recv()
598 /* Zero out pointer to DMA buffer info; in idpf_ctlq_recv()
601 cq->bi.rx_buff[ntc] = NULL; in idpf_ctlq_recv()
603 memcpy(q_msg[i].ctx.direct, desc->params.raw, in idpf_ctlq_recv()
611 if (ntc == cq->ring_size) in idpf_ctlq_recv()
615 cq->next_to_clean = ntc; in idpf_ctlq_recv()
617 mutex_unlock(&cq->cq_lock); in idpf_ctlq_recv()
621 err = -ENOMSG; in idpf_ctlq_recv()