/linux-6.14.4/drivers/accel/habanalabs/common/ |
D | command_buffer.c | 17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument 26 "Mapping a CB to the device's MMU is not supported\n"); in cb_map_mem() 30 if (cb->is_mmu_mapped) in cb_map_mem() 33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem() 35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem() 36 if (!cb->virtual_addr) { in cb_map_mem() 37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); in cb_map_mem() 43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem() 45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem() 55 cb->is_mmu_mapped = true; in cb_map_mem() [all …]
|
/linux-6.14.4/drivers/scsi/ |
D | myrb.c | 90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb) in myrb_create_mempools() argument 95 elem_size = cb->host->sg_tablesize * elem_align; in myrb_create_mempools() 96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, in myrb_create_mempools() 98 if (cb->sg_pool == NULL) { in myrb_create_mempools() 99 shost_printk(KERN_ERR, cb->host, in myrb_create_mempools() 104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, in myrb_create_mempools() 107 if (!cb->dcdb_pool) { in myrb_create_mempools() 108 dma_pool_destroy(cb->sg_pool); in myrb_create_mempools() 109 cb->sg_pool = NULL; in myrb_create_mempools() 110 shost_printk(KERN_ERR, cb->host, in myrb_create_mempools() [all …]
|
/linux-6.14.4/drivers/staging/media/atomisp/pci/base/circbuf/src/ |
D | circbuf.c | 22 * @param cb The pointer to the circular buffer. 27 ia_css_circbuf_read(ia_css_circbuf_t *cb); 35 * @param cb The pointer to the circular buffer. 39 static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb, 63 ia_css_circbuf_create(ia_css_circbuf_t *cb, in ia_css_circbuf_create() argument 71 cb->desc = desc; in ia_css_circbuf_create() 73 cb->desc->start = 0; in ia_css_circbuf_create() 74 cb->desc->end = 0; in ia_css_circbuf_create() 75 cb->desc->step = 0; in ia_css_circbuf_create() 77 for (i = 0; i < cb->desc->size; i++) in ia_css_circbuf_create() [all …]
|
/linux-6.14.4/drivers/staging/media/atomisp/pci/base/circbuf/interface/ |
D | ia_css_circbuf.h | 35 * @param cb The pointer to the circular buffer. 40 ia_css_circbuf_t *cb, 47 * @param cb The pointer to the circular buffer. 50 ia_css_circbuf_t *cb); 58 * @param cb The pointer to the circular buffer. 63 ia_css_circbuf_t *cb); 71 * @param cb The pointer to the circular buffer. 77 ia_css_circbuf_t *cb, 131 * @param cb The pointer to the circular buffer. 138 ia_css_circbuf_t *cb, in ia_css_circbuf_get_pos_at_offset() argument [all …]
|
/linux-6.14.4/drivers/mfd/ |
D | lm3533-ctrlbank.c | 28 static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base) in lm3533_ctrlbank_get_reg() argument 30 return base + cb->id; in lm3533_ctrlbank_get_reg() 33 int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_enable() argument 38 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_enable() 40 mask = 1 << cb->id; in lm3533_ctrlbank_enable() 41 ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, in lm3533_ctrlbank_enable() 44 dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id); in lm3533_ctrlbank_enable() 50 int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb) in lm3533_ctrlbank_disable() argument 55 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id); in lm3533_ctrlbank_disable() 57 mask = 1 << cb->id; in lm3533_ctrlbank_disable() [all …]
|
/linux-6.14.4/block/ |
D | blk-stat.c | 53 struct blk_stat_callback *cb; in blk_stat_add() local 62 list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { in blk_stat_add() 63 if (!blk_stat_is_active(cb)) in blk_stat_add() 66 bucket = cb->bucket_fn(rq); in blk_stat_add() 70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 79 struct blk_stat_callback *cb = from_timer(cb, t, timer); in blk_stat_timer_fn() local 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 89 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() [all …]
|
/linux-6.14.4/drivers/net/mdio/ |
D | mdio-mux.c | 40 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read() local 41 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read() 45 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read() 49 pb->current_child = cb->bus_number; in mdio_mux_read() 61 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_read_c45() local 62 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_read_c45() 66 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); in mdio_mux_read_c45() 70 pb->current_child = cb->bus_number; in mdio_mux_read_c45() 85 struct mdio_mux_child_bus *cb = bus->priv; in mdio_mux_write() local 86 struct mdio_mux_parent_bus *pb = cb->parent; in mdio_mux_write() [all …]
|
/linux-6.14.4/drivers/irqchip/ |
D | irq-crossbar.c | 43 static struct crossbar_device *cb; variable 47 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writel() 52 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writew() 57 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); in crossbar_writeb() 84 raw_spin_lock(&cb->lock); in allocate_gic_irq() 85 for (i = cb->int_max - 1; i >= 0; i--) { in allocate_gic_irq() 86 if (cb->irq_map[i] == IRQ_FREE) { in allocate_gic_irq() 87 cb->irq_map[i] = hwirq; in allocate_gic_irq() 91 raw_spin_unlock(&cb->lock); in allocate_gic_irq() 104 cb->irq_map[i] = IRQ_FREE; in allocate_gic_irq() [all …]
|
/linux-6.14.4/fs/nfsd/ |
D | nfs4callback.c | 73 * Encode/decode NFSv4 CB basic data types 240 dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status); in nfs_cb_stat_to_errno() 435 const struct nfsd4_callback *cb, in encode_cb_sequence4args() argument 438 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in encode_cb_sequence4args() 448 *p++ = cpu_to_be32(session->se_cb_seq_nr[cb->cb_held_slot]); /* csa_sequenceid */ in encode_cb_sequence4args() 449 *p++ = cpu_to_be32(cb->cb_held_slot); /* csa_slotid */ in encode_cb_sequence4args() 504 struct nfsd4_callback *cb) in decode_cb_sequence4resok() argument 506 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in decode_cb_sequence4resok() 526 if (seqid != session->se_cb_seq_nr[cb->cb_held_slot]) { in decode_cb_sequence4resok() 532 if (slotid != cb->cb_held_slot) { in decode_cb_sequence4resok() [all …]
|
/linux-6.14.4/drivers/misc/mei/ |
D | interrupt.c | 31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local 34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler() 35 cl = cb->cl; in mei_irq_compl_handler() 36 list_del_init(&cb->list); in mei_irq_compl_handler() 39 mei_cl_complete(cl, cb); in mei_irq_compl_handler() 99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local 115 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg() 116 if (!cb) { in mei_cl_irq_read_msg() 118 cl_err(dev, cl, "pending read cb not found\n"); in mei_cl_irq_read_msg() 121 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg() [all …]
|
D | client.c | 316 * @cb: mei callback struct 318 void mei_io_cb_free(struct mei_cl_cb *cb) in mei_io_cb_free() argument 320 if (cb == NULL) in mei_io_cb_free() 323 list_del(&cb->list); in mei_io_cb_free() 324 kvfree(cb->buf.data); in mei_io_cb_free() 325 kfree(cb->ext_hdr); in mei_io_cb_free() 326 kfree(cb); in mei_io_cb_free() 332 * @cb: mei callback struct 337 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, in mei_tx_cb_enqueue() argument 340 list_add_tail(&cb->list, head); in mei_tx_cb_enqueue() [all …]
|
/linux-6.14.4/fs/dlm/ |
D | ast.c | 41 static void dlm_do_callback(struct dlm_callback *cb) in dlm_do_callback() argument 43 dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags, in dlm_do_callback() 44 cb->sb_flags, cb->sb_status, cb->lkb_lksb, in dlm_do_callback() 45 cb->astfn, cb->bastfn, cb->astparam, in dlm_do_callback() 46 cb->res_name, cb->res_length); in dlm_do_callback() 47 dlm_free_cb(cb); in dlm_do_callback() 52 struct dlm_callback *cb = container_of(work, struct dlm_callback, work); in dlm_callback_work() local 54 dlm_do_callback(cb); in dlm_callback_work() 68 /* if cb is a bast, it should be skipped if the blocking mode is in dlm_may_skip_callback() 123 struct dlm_callback **cb) in dlm_get_cb() argument [all …]
|
/linux-6.14.4/Documentation/userspace-api/media/v4l/ |
D | pixfmt-yuv-planar.rst | 14 Cb and Cr components interleaved. 16 - Fully planar formats use three planes to store the Y, Cb and Cr components 36 components in the second plane. The Cb and Cr components are interleaved in the 37 chroma plane, with Cb and Cr always stored in pairs. The chroma order is 72 - Cb, Cr 79 - Cr, Cb 86 - Cb, Cr 93 - Cr, Cb 100 - Cb, Cr 109 - Cb, Cr [all …]
|
D | pixfmt-packed-yuv.rst | 9 Similarly to the packed RGB formats, the packed YUV formats store the Y, Cb and 16 - 'Y', 'Cb' and 'Cr' denote bits of the luma, blue chroma (also known as 26 full triplet of Y, Cb and Cr values. 29 component. They are named based on the order of the Y, Cb and Cr components as 33 Cb\ :sub:`5-0` Cr\ :sub:`4-0`], and stored in memory in two bytes, 34 [Cb\ :sub:`2-0` Cr\ :sub:`4-0`] followed by [Y'\ :sub:`4-0` Cb\ :sub:`5-3`]. 80 - Cb\ :sub:`3` 81 - Cb\ :sub:`2` 82 - Cb\ :sub:`1` 83 - Cb\ :sub:`0` [all …]
|
/linux-6.14.4/drivers/net/ethernet/netronome/nfp/ |
D | ccm_mbox.c | 64 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_msg_init() local 66 cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED; in nfp_ccm_mbox_msg_init() 67 cb->err = 0; in nfp_ccm_mbox_msg_init() 68 cb->max_len = max_len; in nfp_ccm_mbox_msg_init() 69 cb->exp_reply = exp_reply; in nfp_ccm_mbox_msg_init() 70 cb->posted = false; in nfp_ccm_mbox_msg_init() 75 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_maxlen() local 77 return cb->max_len; in nfp_ccm_mbox_maxlen() 82 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb; in nfp_ccm_mbox_done() local 84 return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE; in nfp_ccm_mbox_done() [all …]
|
/linux-6.14.4/tools/testing/selftests/bpf/verifier/ |
D | ctx_skb.c | 392 "check cb access: byte", 396 offsetof(struct __sk_buff, cb[0])), 398 offsetof(struct __sk_buff, cb[0]) + 1), 400 offsetof(struct __sk_buff, cb[0]) + 2), 402 offsetof(struct __sk_buff, cb[0]) + 3), 404 offsetof(struct __sk_buff, cb[1])), 406 offsetof(struct __sk_buff, cb[1]) + 1), 408 offsetof(struct __sk_buff, cb[1]) + 2), 410 offsetof(struct __sk_buff, cb[1]) + 3), 412 offsetof(struct __sk_buff, cb[2])), [all …]
|
/linux-6.14.4/drivers/dma/ |
D | dmaengine.h | 107 * @cb: temp struct to hold the callback info 109 * Fill the passed in cb struct with what's available in the passed in 115 struct dmaengine_desc_callback *cb) in dmaengine_desc_get_callback() argument 117 cb->callback = tx->callback; in dmaengine_desc_get_callback() 118 cb->callback_result = tx->callback_result; in dmaengine_desc_get_callback() 119 cb->callback_param = tx->callback_param; in dmaengine_desc_get_callback() 123 * dmaengine_desc_callback_invoke - call the callback function in cb struct 124 * @cb: temp struct that is holding the callback info 127 * Call the callback function provided in the cb struct with the parameter 128 * in the cb struct. [all …]
|
/linux-6.14.4/include/trace/events/ |
D | notifier.h | 12 TP_PROTO(void *cb), 14 TP_ARGS(cb), 17 __field(void *, cb) 21 __entry->cb = cb; 24 TP_printk("%ps", __entry->cb) 30 * @cb: callback pointer 35 TP_PROTO(void *cb), 37 TP_ARGS(cb) 43 * @cb: callback pointer 48 TP_PROTO(void *cb), [all …]
|
/linux-6.14.4/drivers/misc/sgi-gru/ |
D | gru_instructions.h | 22 extern int gru_check_status_proc(void *cb); 23 extern int gru_wait_proc(void *cb); 24 extern void gru_wait_abort_proc(void *cb); 52 /* CB substatus bitmasks */ 56 /* CB substatus message queue values (low 3 bits of substatus) */ 69 unsigned long cb; member 349 static inline void gru_vload_phys(void *cb, unsigned long gpa, in gru_vload_phys() argument 352 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vload_phys() 361 static inline void gru_vstore_phys(void *cb, unsigned long gpa, in gru_vstore_phys() argument 364 struct gru_instruction *ins = (struct gru_instruction *)cb; in gru_vstore_phys() [all …]
|
D | grukservices.c | 61 * - 1 CB & a few DSRs that are reserved for each cpu on the blade. 255 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) in gru_get_cpu_resources() argument 263 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; in gru_get_cpu_resources() 271 static void gru_free_cpu_resources(void *cb, void *dsr) in gru_free_cpu_resources() argument 354 * cb - pointer to first CBR 357 void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) in gru_lock_async_resource() argument 365 if (cb) in gru_lock_async_resource() 366 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; in gru_lock_async_resource() 385 int gru_get_cb_exception_detail(void *cb, in gru_get_cb_exception_detail() argument 394 * Locate kgts for cb. This algorithm is SLOW but in gru_get_cb_exception_detail() [all …]
|
/linux-6.14.4/arch/s390/kernel/ |
D | runtime_instr.c | 53 static void init_runtime_instr_cb(struct runtime_instr_cb *cb) in init_runtime_instr_cb() argument 55 cb->rla = 0xfff; in init_runtime_instr_cb() 56 cb->s = 1; in init_runtime_instr_cb() 57 cb->k = 1; in init_runtime_instr_cb() 58 cb->ps = 1; in init_runtime_instr_cb() 59 cb->pc = 1; in init_runtime_instr_cb() 60 cb->key = PAGE_DEFAULT_KEY >> 4; in init_runtime_instr_cb() 61 cb->v = 1; in init_runtime_instr_cb() 72 struct runtime_instr_cb *cb; in SYSCALL_DEFINE2() local 86 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in SYSCALL_DEFINE2() [all …]
|
/linux-6.14.4/drivers/dma-buf/ |
D | st-dma-fence.c | 37 struct dma_fence_cb cb; member 41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) in mock_wakeup() argument 43 wake_up_process(container_of(cb, struct wait_cb, cb)->task); in mock_wakeup() 49 struct wait_cb cb = { .task = current }; in mock_wait() local 51 if (dma_fence_add_callback(f, &cb.cb, mock_wakeup)) in mock_wait() 67 if (!dma_fence_remove_callback(f, &cb.cb)) in mock_wait() 151 struct dma_fence_cb cb; member 155 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb) in simple_callback() argument 157 smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true); in simple_callback() 162 struct simple_cb cb = {}; in test_add_callback() local [all …]
|
/linux-6.14.4/drivers/net/ethernet/brocade/bna/ |
D | bfi_reg.h | 19 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 20 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 23 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 24 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 28 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 29 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ 33 #define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ 59 #define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ 89 #define HOST_SEM0_REG 0x00014230 /* cb/ct */ 90 #define HOST_SEM1_REG 0x00014234 /* cb/ct */ [all …]
|
/linux-6.14.4/drivers/scsi/bfa/ |
D | bfi_reg.h | 18 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 19 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 22 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 23 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 27 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 28 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ 32 #define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ 58 #define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ 88 #define HOST_SEM0_REG 0x00014230 /* cb/ct */ 89 #define HOST_SEM1_REG 0x00014234 /* cb/ct */ [all …]
|
/linux-6.14.4/drivers/gpu/drm/i915/ |
D | i915_sw_fence.c | 421 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); in dma_i915_sw_fence_wake() local 423 i915_sw_fence_set_error_once(cb->fence, dma->error); in dma_i915_sw_fence_wake() 424 i915_sw_fence_complete(cb->fence); in dma_i915_sw_fence_wake() 425 kfree(cb); in dma_i915_sw_fence_wake() 430 struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer); in timer_i915_sw_fence_wake() local 433 fence = xchg(&cb->base.fence, NULL); in timer_i915_sw_fence_wake() 438 cb->dma->ops->get_driver_name(cb->dma), in timer_i915_sw_fence_wake() 439 cb->dma->ops->get_timeline_name(cb->dma), in timer_i915_sw_fence_wake() 440 cb->dma->seqno, in timer_i915_sw_fence_wake() 450 struct i915_sw_dma_fence_cb_timer *cb = in dma_i915_sw_fence_wake_timer() local [all …]
|