Home
last modified time | relevance | path

Searched +full:dma +full:- +full:engine (Results 1 – 25 of 1038) sorted by relevance

12345678910>>...42

/linux-6.14.4/drivers/dma/
DKconfig1 # SPDX-License-Identifier: GPL-2.0-only
3 # DMA engine configuration
7 bool "DMA Engine support"
10 DMA engines can do asynchronous data transfers without
14 DMA Device drivers supported by the configured arch, it may
18 bool "DMA Engine debugging"
22 say N here. This enables DMA engine core and driver debugging.
25 bool "DMA Engine verbose debugging"
30 the DMA engine core and drivers.
35 comment "DMA Devices"
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/engine/dma/
DKbuild1 # SPDX-License-Identifier: MIT
2 nvkm-y += nvkm/engine/dma/base.o
3 nvkm-y += nvkm/engine/dma/nv04.o
4 nvkm-y += nvkm/engine/dma/nv50.o
5 nvkm-y += nvkm/engine/dma/gf100.o
6 nvkm-y += nvkm/engine/dma/gf119.o
7 nvkm-y += nvkm/engine/dma/gv100.o
9 nvkm-y += nvkm/engine/dma/user.o
10 nvkm-y += nvkm/engine/dma/usernv04.o
11 nvkm-y += nvkm/engine/dma/usernv50.o
[all …]
Dbase.c27 #include <engine/fifo.h>
36 struct nvkm_dma *dma = nvkm_dma(oclass->engine); in nvkm_dma_oclass_new() local
40 ret = dma->func->class_new(dma, oclass, data, size, &dmaobj); in nvkm_dma_oclass_new()
42 *pobject = &dmaobj->object; in nvkm_dma_oclass_new()
55 return nvkm_dma_oclass_new(oclass->engine->subdev.device, in nvkm_dma_oclass_fifo_new()
73 sclass->base = oclass[0]; in nvkm_dma_oclass_base_get()
74 sclass->engn = oclass; in nvkm_dma_oclass_base_get()
86 oclass->base = nvkm_dma_sclass[index]; in nvkm_dma_oclass_fifo_get()
93 nvkm_dma_dtor(struct nvkm_engine *engine) in nvkm_dma_dtor() argument
95 return nvkm_dma(engine); in nvkm_dma_dtor()
[all …]
/linux-6.14.4/drivers/crypto/marvell/cesa/
Dcesa.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
5 * driver supports the TDMA engine on platforms on which it is available.
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
15 #include <linux/dma-mapping.h>
38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument
43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked()
44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked()
52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument
58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine()
[all …]
Dcesa.h1 /* SPDX-License-Identifier: GPL-2.0 */
8 #include <linux/dma-direction.h>
70 * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
124 * /-----------\ 0
126 * |-----------| 0x20
128 * |-----------| 0x40
130 * |-----------| 0x40 (inplace)
132 * |-----------| 0x80
133 * | DATA IN | 16 * x (max ->max_req_size)
134 * |-----------| 0x80 (inplace operation)
[all …]
Dtdma.c1 // SPDX-License-Identifier: GPL-2.0-only
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
19 if (!sgiter->sg) in mv_cesa_req_dma_iter_next_transfer()
22 sgiter->op_offset += len; in mv_cesa_req_dma_iter_next_transfer()
23 sgiter->offset += len; in mv_cesa_req_dma_iter_next_transfer()
24 if (sgiter->offset == sg_dma_len(sgiter->sg)) { in mv_cesa_req_dma_iter_next_transfer()
25 if (sg_is_last(sgiter->sg)) in mv_cesa_req_dma_iter_next_transfer()
27 sgiter->offset = 0; in mv_cesa_req_dma_iter_next_transfer()
28 sgiter->sg = sg_next(sgiter->sg); in mv_cesa_req_dma_iter_next_transfer()
31 if (sgiter->op_offset == iter->op_len) in mv_cesa_req_dma_iter_next_transfer()
[all …]
/linux-6.14.4/Documentation/driver-api/dmaengine/
Dclient.rst2 DMA Engine API Guide
7 .. note:: For DMA Engine usage in async_tx please see:
8 ``Documentation/crypto/async-tx-api.rst``
11 Below is a guide to device driver writers on how to use the Slave-DMA API of the
12 DMA Engine. This is applicable only for slave DMA usage only.
14 DMA usage
17 The slave DMA usage consists of following steps:
19 - Allocate a DMA slave channel
21 - Set slave and controller specific parameters
23 - Get a descriptor for transaction
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/mips/cavium/
Ddma-engine.txt1 * DMA Engine.
3 The Octeon DMA Engine transfers between the Boot Bus and main memory.
4 The DMA Engine will be referred to by phandle by any device that is
8 - compatible: "cavium,octeon-5750-bootbus-dma"
12 - reg: The base address of the DMA Engine's register bank.
14 - interrupts: A single interrupt specifier.
17 dma0: dma-engine@1180000000100 {
18 compatible = "cavium,octeon-5750-bootbus-dma";
/linux-6.14.4/drivers/gpu/drm/radeon/
Dr600_dma.c31 * DMA
33 * DMA engine. The programming model is very similar
34 * to the 3D engine (ring buffer, IBs, etc.), but the
35 * DMA controller has it's own packet format that is
36 * different form the PM4 format used by the 3D engine.
43 * r600_dma_get_rptr - get the current read pointer
55 if (rdev->wb.enabled) in r600_dma_get_rptr()
56 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr()
64 * r600_dma_get_wptr - get the current write pointer
78 * r600_dma_set_wptr - commit the write pointer
[all …]
Devergreen_dma.c31 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
36 * Add a DMA fence packet to the ring to write
37 * the fence seq number and DMA trap packet to generate
38 * an interrupt if needed (evergreen-SI).
43 struct radeon_ring *ring = &rdev->ring[fence->ring]; in evergreen_dma_fence_ring_emit()
44 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in evergreen_dma_fence_ring_emit()
49 radeon_ring_write(ring, fence->seq); in evergreen_dma_fence_ring_emit()
59 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
64 * Schedule an IB in the DMA ring (evergreen).
69 struct radeon_ring *ring = &rdev->ring[ib->ring]; in evergreen_dma_ring_ib_execute()
[all …]
/linux-6.14.4/drivers/soc/sunxi/
Dsunxi_mbus.c1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/dma-map-ops.h>
13 * The display engine virtual devices are not strictly speaking
15 * memory allocations and DMA operations through that device, we
18 "allwinner,sun4i-a10-display-engine",
19 "allwinner,sun5i-a10s-display-engine",
20 "allwinner,sun5i-a13-display-engine",
21 "allwinner,sun6i-a31-display-engine",
22 "allwinner,sun6i-a31s-display-engine",
23 "allwinner,sun7i-a20-display-engine",
[all …]
/linux-6.14.4/drivers/dma/ti/
DKconfig1 # SPDX-License-Identifier: GPL-2.0-only
3 # Texas Instruments DMA drivers
7 tristate "Texas Instruments CPPI 4.1 DMA support"
11 The Communications Port Programming Interface (CPPI) 4.1 DMA engine
22 Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
23 engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
27 tristate "Texas Instruments sDMA (omap-dma) support"
34 Enable support for the TI sDMA (System DMA or DMA4) controller. This
35 DMA engine is found on OMAP and DRA7xx parts.
47 Enable support for the TI UDMA (Unified DMA) controller. This
[all …]
/linux-6.14.4/arch/powerpc/platforms/powernv/
Dopal-hmi.c1 // SPDX-License-Identifier: GPL-2.0-or-later
62 "Hypervisor Resource error - core check stop" }, in print_core_checkstop_reason()
74 if (!hmi_evt->u.xstop_error.xstop_reason) { in print_core_checkstop_reason()
80 be32_to_cpu(hmi_evt->u.xstop_error.u.pir)); in print_core_checkstop_reason()
82 if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) & in print_core_checkstop_reason()
84 printk("%s [Unit: %-3s] %s\n", level, in print_core_checkstop_reason()
94 { NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine", in print_nx_checkstop_reason()
96 { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine", in print_nx_checkstop_reason()
97 "DMA invalid state error bit 15" }, in print_nx_checkstop_reason()
98 { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine", in print_nx_checkstop_reason()
[all …]
/linux-6.14.4/drivers/soc/qcom/
Dqcom-geni-se.c1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
10 #include <linux/dma-mapping.h>
17 #include <linux/soc/qcom/geni-se.h>
22 * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced
31 * GENI based QUP is a highly-flexible and programmable module for supporting
36 * determined by the firmware loaded to the serial engine. Each SE consists
37 * of a DMA Engine and GENI sub modules which enable serial engines to
38 * support FIFO and DMA modes of operation.
41 * +-----------------------------------------+
[all …]
/linux-6.14.4/drivers/gpu/drm/xe/
Dxe_vm_doc.h1 /* SPDX-License-Identifier: MIT */
16 * bind engine, and return a handle to the user.
19 * ------------
33 * ----------
35 * DRM_XE_VM_BIND_OP_MAP - Create mapping for a BO
36 * DRM_XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
37 * DRM_XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
54 * .. code-block::
56 * bind BO0 0x0-0x1000
62 * bind BO1 0x201000-0x202000
[all …]
/linux-6.14.4/drivers/dma/sh/
DKconfig1 # SPDX-License-Identifier: GPL-2.0
3 # DMA engine configuration for sh
11 # DMA Engine Helpers
15 bool "Renesas SuperH DMA Engine support"
22 Enable support for the Renesas SuperH DMA controllers.
25 # DMA Controllers
32 Enable support for the Renesas SuperH DMA controllers.
35 tristate "Renesas R-Car Gen{2,3} and RZ/G{1,2} DMA Controller"
39 This driver supports the general purpose DMA controller found in the
40 Renesas R-Car Gen{2,3} and RZ/G{1,2} SoCs.
[all …]
Dusb-dmac.c1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas USB DMA Controller Driver
7 * based on rcar-dmac.c
13 #include <linux/dma-mapping.h>
27 #include "../virt-dma.h"
30 * struct usb_dmac_sg - Descriptor for a hardware transfer
40 * struct usb_dmac_desc - USB DMA Transfer Descriptor
41 * @vd: base virtual channel DMA transaction descriptor
42 * @direction: direction of the DMA transfer
66 * struct usb_dmac_chan - USB DMA Controller Channel
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/dma/xilinx/
Dxilinx_dma.txt1 Xilinx AXI VDMA engine, it does transfers between memory and video devices.
6 Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
11 Xilinx AXI CDMA engine, it does transfers between memory-mapped source
12 address and a memory-mapped destination address.
14 Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream
19 - compatible: Should be one of-
20 "xlnx,axi-vdma-1.00.a"
21 "xlnx,axi-dma-1.00.a"
22 "xlnx,axi-cdma-1.00.a"
23 "xlnx,axi-mcdma-1.00.a"
[all …]
/linux-6.14.4/drivers/dma/amd/
DKconfig1 # SPDX-License-Identifier: GPL-2.0-only
5 tristate "AMD AE4DMA Engine"
12 provides DMA capabilities to perform high bandwidth memory to
13 memory and IO copy operations. It performs DMA transfer through
14 queue-based descriptor management. This DMA controller is intended
15 to be used with AMD Non-Transparent Bridge devices and not for
16 general purpose peripheral DMA.
19 tristate "AMD PassThru DMA Engine"
25 provides DMA capabilities to perform high bandwidth memory to
26 memory and IO copy operations. It performs DMA transfer through
[all …]
/linux-6.14.4/drivers/ata/
Dpata_sl82c105.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * pata_sl82c105.c - SL82C105 PATA for new ATA layer
14 * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
45 * sl82c105_pre_reset - probe begin
58 struct ata_port *ap = link->ap; in sl82c105_pre_reset()
59 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in sl82c105_pre_reset()
61 if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) in sl82c105_pre_reset()
62 return -ENOENT; in sl82c105_pre_reset()
68 * sl82c105_configure_piomode - set chip PIO timing
80 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in sl82c105_configure_piomode()
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/gpu/host1x/
Dnvidia,tegra210-nvenc.yaml1 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
3 ---
4 $id: http://devicetree.org/schemas/gpu/host1x/nvidia,tegra210-nvenc.yaml#
5 $schema: http://devicetree.org/meta-schemas/core.yaml#
15 - Thierry Reding <[email protected]>
16 - Mikko Perttunen <[email protected]>
20 pattern: "^nvenc@[0-9a-f]*$"
24 - nvidia,tegra210-nvenc
25 - nvidia,tegra186-nvenc
26 - nvidia,tegra194-nvenc
[all …]
Dnvidia,tegra210-nvdec.yaml1 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
3 ---
4 $id: http://devicetree.org/schemas/gpu/host1x/nvidia,tegra210-nvdec.yaml#
5 $schema: http://devicetree.org/meta-schemas/core.yaml#
15 - Thierry Reding <[email protected]>
16 - Mikko Perttunen <[email protected]>
20 pattern: "^nvdec@[0-9a-f]*$"
24 - nvidia,tegra210-nvdec
25 - nvidia,tegra186-nvdec
26 - nvidia,tegra194-nvdec
[all …]
/linux-6.14.4/Documentation/misc-devices/
Dmrvl_cn10k_dpi.rst1 .. SPDX-License-Identifier: GPL-2.0
4 Marvell CN10K DMA packet interface (DPI) driver
10 DPI is a DMA packet interface hardware block in Marvell's CN10K silicon.
12 mailbox logic, and a set of DMA engines & DMA command queues.
15 requests from its VF functions and provisions DMA engine resources to
20 the DMA engines and VF device's DMA command queues. Also, driver creates
21 /dev/mrvl-cn10k-dpi node to set DMA engine and PEM (PCIe interface) port
26 DMA operations. Only VF devices are provisioned with DMA capabilities.
38 a pem port to which DMA engines are wired.
42 ioctl that sets DMA engine's fifo sizes & max outstanding load request
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/soc/intel/
Dintel,hps-copy-engine.yaml1 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
4 ---
5 $id: http://devicetree.org/schemas/soc/intel/intel,hps-copy-engine.yaml#
6 $schema: http://devicetree.org/meta-schemas/core.yaml#
8 title: Intel HPS Copy Engine
11 - Matthew Gerlach <[email protected]>
14 The Intel Hard Processor System (HPS) Copy Engine is an IP block used to copy
17 well as a keep-a-live indication to the host.
21 const: intel,hps-copy-engine
23 '#dma-cells':
[all …]
/linux-6.14.4/drivers/crypto/aspeed/
Daspeed-hace.c1 // SPDX-License-Identifier: GPL-2.0+
6 #include "aspeed-hace.h"
7 #include <crypto/engine.h>
9 #include <linux/dma-mapping.h>
21 dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
24 dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
31 struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; in aspeed_hace_irq()
32 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; in aspeed_hace_irq()
41 if (hash_engine->flags & CRYPTO_FLAGS_BUSY) in aspeed_hace_irq()
42 tasklet_schedule(&hash_engine->done_task); in aspeed_hace_irq()
[all …]

12345678910>>...42