Home
last modified time | relevance | path

Searched full:unmapped (Results 1 – 25 of 487) sorted by relevance

12345678910>>...20

/linux-6.14.4/drivers/gpu/drm/msm/
Dmsm_gem_shrinker.c188 unsigned idx, unmapped = 0; in msm_gem_shrinker_vmap() local
191 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { in msm_gem_shrinker_vmap()
192 unmapped += drm_gem_lru_scan(lrus[idx], in msm_gem_shrinker_vmap()
193 vmap_shrink_limit - unmapped, in msm_gem_shrinker_vmap()
198 *(unsigned long *)ptr += unmapped; in msm_gem_shrinker_vmap()
200 if (unmapped > 0) in msm_gem_shrinker_vmap()
201 trace_msm_gem_purge_vmaps(unmapped); in msm_gem_shrinker_vmap()
Dmsm_gpu_trace.h143 TP_PROTO(u32 unmapped),
144 TP_ARGS(unmapped),
146 __field(u32, unmapped)
149 __entry->unmapped = unmapped;
151 TP_printk("Purging %u vmaps", __entry->unmapped)
Dmsm_iommu.c98 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local
102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
103 if (!unmapped) in msm_iommu_pagetable_unmap()
106 iova += unmapped; in msm_iommu_pagetable_unmap()
107 size -= unmapped; in msm_iommu_pagetable_unmap()
/linux-6.14.4/include/trace/events/
Dhuge_memory.h59 int referenced, int none_or_zero, int status, int unmapped),
61 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
70 __field(int, unmapped)
80 __entry->unmapped = unmapped;
83 …TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped
90 __entry->unmapped)
/linux-6.14.4/mm/damon/
Dvaddr.c107 * Find three regions separated by two biggest unmapped regions
113 * separated by the two biggest unmapped regions in the space. Please refer to
202 * is actually mapped to the memory and accessed, monitoring the unmapped
207 * with the noise by simply identifying the unmapped areas as a region that
209 * unmapped areas inside will make the adaptive mechanism quite complex. That
210 * said, too huge unmapped areas inside the monitoring target should be removed
215 * between the three regions are the two biggest unmapped areas in the given
217 * end of the mappings and the two biggest unmapped areas of the address space.
226 * region and the stack will be two biggest unmapped regions. Because these
228 * two biggest unmapped regions will be sufficient to make a trade-off.
[all …]
/linux-6.14.4/drivers/scsi/lpfc/
Dlpfc_disc.h228 * nodes transition from the unmapped to the mapped list.
247 * and put on the unmapped list. For ADISC processing, the node is taken off
248 * the ADISC list and placed on either the mapped or unmapped list (depending
249 * on its previous state). Once on the unmapped list, a PRLI is issued and the
252 * node, the node is taken off the unmapped list. The binding list is checked
254 * assignment is unsuccessful, the node is left on the unmapped list. If
259 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
264 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
281 * unmapped lists.
/linux-6.14.4/drivers/iommu/amd/
Dio_pgtable_v2.c292 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local
299 while (unmapped < size) { in iommu_v2_unmap_pages()
302 return unmapped; in iommu_v2_unmap_pages()
307 unmapped += unmap_size; in iommu_v2_unmap_pages()
310 return unmapped; in iommu_v2_unmap_pages()
Dio_pgtable.c416 unsigned long long unmapped; in iommu_v1_unmap_pages() local
423 unmapped = 0; in iommu_v1_unmap_pages()
425 while (unmapped < size) { in iommu_v1_unmap_pages()
434 return unmapped; in iommu_v1_unmap_pages()
438 unmapped += unmap_size; in iommu_v1_unmap_pages()
441 return unmapped; in iommu_v1_unmap_pages()
/linux-6.14.4/drivers/vfio/
Dvfio_iommu_type1.c969 size_t unmapped = 0; in unmap_unpin_fast() local
973 unmapped = iommu_unmap_fast(domain->domain, *iova, len, in unmap_unpin_fast()
976 if (!unmapped) { in unmap_unpin_fast()
981 entry->len = unmapped; in unmap_unpin_fast()
984 *iova += unmapped; in unmap_unpin_fast()
993 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast()
999 return unmapped; in unmap_unpin_fast()
1007 size_t unmapped = iommu_unmap(domain->domain, *iova, len); in unmap_unpin_slow() local
1009 if (unmapped) { in unmap_unpin_slow()
1012 unmapped >> PAGE_SHIFT, in unmap_unpin_slow()
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/interrupt-controller/
Dti,sci-inta.yaml37 Unmapped events ---->| | umapidx |-------------------------> Globalevents
81 ti,unmapped-event-sources:
86 Array of phandles to DMA controllers where the unmapped events originate.
/linux-6.14.4/drivers/gpu/drm/nouveau/
Dnouveau_exec.c33 * mapping. If such memory backed mappings are unmapped the kernel will make
36 * will result in those memory backed mappings being unmapped first.
46 * backed mappings being mapped and unmapped, either within a single or multiple
52 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
54 * range of the previously unmapped sparse mapping within the same VM_BIND
/linux-6.14.4/mm/
Dmigrate_device.c65 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local
261 unmapped++; in migrate_vma_collect_pmd()
273 if (unmapped) in migrate_vma_collect_pmd()
368 unsigned long unmapped = 0; in migrate_device_unmap() local
378 unmapped++; in migrate_device_unmap()
416 unmapped++; in migrate_device_unmap()
435 return unmapped; in migrate_device_unmap()
465 * and unmapped, check whether each page is pinned or not. Pages that aren't
550 * At this point pages are locked and unmapped, and thus they have in migrate_vma_setup()
712 * called if the page could not be unmapped. in __migrate_device_pages()
/linux-6.14.4/drivers/media/platform/chips-media/wave5/
Dwave5-vdi.c91 dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__); in wave5_vdi_clear_memory()
103 dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__); in wave5_vdi_write_memory()
142 dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__); in wave5_vdi_free_dma_memory()
/linux-6.14.4/include/linux/dma/
Dk3-psil.h56 * @flow_start: PKDMA flow range start of mapped channel. Unmapped
58 * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
/linux-6.14.4/Documentation/translations/zh_CN/mm/damon/
Ddesign.rst57 <BIG UNMAPPED REGION 1>
61 <BIG UNMAPPED REGION 2>
/linux-6.14.4/lib/
Ddevres.c70 * Managed ioremap(). Map is automatically unmapped on driver detach.
85 * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
100 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
274 * Managed ioport_map(). Map is automatically unmapped on driver
/linux-6.14.4/mm/damon/tests/
Dvaddr-kunit.h45 * regions should not include the two biggest unmapped areas in the original
48 * Because these two unmapped areas are very huge but obviously never accessed,
53 * unmapped areas. After that, based on the information, it constructs the
61 * and end with 305. The process also has three unmapped areas, 25-200,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
/linux-6.14.4/drivers/staging/media/ipu3/
Dipu3-mmu.c383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local
402 while (unmapped < size) { in imgu_mmu_unmap()
407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", in imgu_mmu_unmap()
411 unmapped += unmapped_page; in imgu_mmu_unmap()
416 return unmapped; in imgu_mmu_unmap()
/linux-6.14.4/drivers/iommu/
Dvirtio-iommu.c342 * On success, returns the number of unmapped bytes
347 size_t unmapped = 0; in viommu_del_mappings() local
367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
374 return unmapped; in viommu_del_mappings()
863 size_t unmapped; in viommu_unmap_pages() local
868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
869 if (unmapped < size) in viommu_unmap_pages()
874 return unmapped; in viommu_unmap_pages()
880 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages()
884 return ret ? 0 : unmapped; in viommu_unmap_pages()
/linux-6.14.4/Documentation/arch/x86/x86_64/
D5level-paging.rst49 to look for unmapped area by specified address. If it's already
50 occupied, we look for unmapped area in *full* address space, rather than
/linux-6.14.4/arch/hexagon/include/asm/
Dprocessor.h47 * Apparently the convention is that ld.so will ask for "unmapped" private
52 * you have to kick the "unmapped" base requests higher up.
/linux-6.14.4/drivers/irqchip/
Dirq-ti-sci-inta.c90 * unmapped event sources.
91 * Unmapped Events are not part of the Global Event Map and
95 * generating Unmapped Event, we must use the INTA's TI-SCI
127 * For devices sending Unmapped Events we must use the INTA's TI-SCI in ti_sci_inta_get_dev_id()
619 count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); in ti_sci_inta_get_unmapped_sources()
630 of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { in ti_sci_inta_get_unmapped_sources()
/linux-6.14.4/tools/testing/selftests/powerpc/signal/
Dsigreturn_vdso.c106 printf("Unmapped VDSO\n"); in test_sigreturn_vdso()
119 printf("Signal delivered OK with VDSO unmapped\n"); in test_sigreturn_vdso()
/linux-6.14.4/Documentation/networking/device_drivers/ethernet/marvell/
Docteontx2.rst173 - Error due to operation of unmapped PF.
187 - Error due to unmapped slot.
237 - Receive packet on an unmapped PF.
249 - Error due to unmapped slot.
291 Rx on unmapped PF_FUNC
/linux-6.14.4/drivers/iommu/iommufd/
Dio_pagetable.c696 unsigned long last, unsigned long *unmapped) in iopt_unmap_iova_range() argument
769 if (unmapped) in iopt_unmap_iova_range()
770 *unmapped = unmapped_bytes; in iopt_unmap_iova_range()
779 * @unmapped: Return number of bytes unmapped
785 unsigned long length, unsigned long *unmapped) in iopt_unmap_iova() argument
795 return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped); in iopt_unmap_iova()
798 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) in iopt_unmap_all() argument
802 rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); in iopt_unmap_all()
920 * will be unmapped from the domain. The domain must already be removed from the

12345678910>>...20