Lines Matching +full:iommu +full:- +full:secure +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 #include <linux/iommu.h>
9 #include "../iommu-priv.h"
25 WARN_ON(igroup->hwpt || !list_empty(&igroup->device_list)); in iommufd_group_release()
27 xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup, in iommufd_group_release()
29 iommu_group_put(igroup->group); in iommufd_group_release()
30 mutex_destroy(&igroup->lock); in iommufd_group_release()
36 kref_put(&group->ref, iommufd_group_release); in iommufd_put_group()
45 * group ID's cannot be re-used until the group is put back which does in iommufd_group_try_get()
48 if (WARN_ON(igroup->group != group)) in iommufd_group_try_get()
50 return kref_get_unless_zero(&igroup->ref); in iommufd_group_try_get()
55 * parallel xarray indexed by iommu_group id to hold this instead of putting it
67 unsigned int id; in iommufd_get_group() local
71 return ERR_PTR(-ENODEV); in iommufd_get_group()
73 id = iommu_group_id(group); in iommufd_get_group()
75 xa_lock(&ictx->groups); in iommufd_get_group()
76 igroup = xa_load(&ictx->groups, id); in iommufd_get_group()
78 xa_unlock(&ictx->groups); in iommufd_get_group()
82 xa_unlock(&ictx->groups); in iommufd_get_group()
87 return ERR_PTR(-ENOMEM); in iommufd_get_group()
90 kref_init(&new_igroup->ref); in iommufd_get_group()
91 mutex_init(&new_igroup->lock); in iommufd_get_group()
92 INIT_LIST_HEAD(&new_igroup->device_list); in iommufd_get_group()
93 new_igroup->sw_msi_start = PHYS_ADDR_MAX; in iommufd_get_group()
95 new_igroup->group = group; in iommufd_get_group()
101 new_igroup->ictx = ictx; in iommufd_get_group()
108 xa_lock(&ictx->groups); in iommufd_get_group()
110 igroup = __xa_cmpxchg(&ictx->groups, id, cur_igroup, new_igroup, in iommufd_get_group()
113 xa_unlock(&ictx->groups); in iommufd_get_group()
120 xa_unlock(&ictx->groups); in iommufd_get_group()
126 xa_unlock(&ictx->groups); in iommufd_get_group()
139 iommu_device_release_dma_owner(idev->dev); in iommufd_device_destroy()
140 iommufd_put_group(idev->igroup); in iommufd_device_destroy()
141 if (!iommufd_selftest_is_mock_dev(idev->dev)) in iommufd_device_destroy()
142 iommufd_ctx_put(idev->ictx); in iommufd_device_destroy()
146 * iommufd_device_bind - Bind a physical device to an iommu fd
149 * @id: Output ID number to return to userspace for this device
162 struct device *dev, u32 *id) in iommufd_device_bind() argument
173 return ERR_PTR(-EINVAL); in iommufd_device_bind()
181 * allowed if the module parameter is set. Secure/Isolated means that a in iommufd_device_bind()
186 !iommu_group_has_isolated_msi(igroup->group)) { in iommufd_device_bind()
188 rc = -EPERM; in iommufd_device_bind()
194 "MSI interrupts are not secure, they cannot be isolated by the platform. " in iommufd_device_bind()
208 idev->ictx = ictx; in iommufd_device_bind()
211 idev->dev = dev; in iommufd_device_bind()
212 idev->enforce_cache_coherency = in iommufd_device_bind()
215 refcount_inc(&idev->obj.users); in iommufd_device_bind()
217 idev->igroup = igroup; in iommufd_device_bind()
218 mutex_init(&idev->iopf_lock); in iommufd_device_bind()
226 iommufd_object_finalize(ictx, &idev->obj); in iommufd_device_bind()
227 *id = idev->obj.id; in iommufd_device_bind()
239 * iommufd_ctx_has_group - True if any device within the group is bound
255 xa_lock(&ictx->objects); in iommufd_ctx_has_group()
256 xa_for_each(&ictx->objects, index, obj) { in iommufd_ctx_has_group()
257 if (obj->type == IOMMUFD_OBJ_DEVICE && in iommufd_ctx_has_group()
259 ->igroup->group == group) { in iommufd_ctx_has_group()
260 xa_unlock(&ictx->objects); in iommufd_ctx_has_group()
264 xa_unlock(&ictx->objects); in iommufd_ctx_has_group()
270 * iommufd_device_unbind - Undo iommufd_device_bind()
280 iommufd_object_destroy_user(idev->ictx, &idev->obj); in iommufd_device_unbind()
286 return idev->ictx; in iommufd_device_to_ictx()
292 return idev->obj.id; in iommufd_device_to_id()
299 phys_addr_t sw_msi_start = igroup->sw_msi_start; in iommufd_group_setup_msi()
303 * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to in iommufd_group_setup_msi()
315 if (sw_msi_start != PHYS_ADDR_MAX && !hwpt_paging->msi_cookie) { in iommufd_group_setup_msi()
316 rc = iommu_get_msi_cookie(hwpt_paging->common.domain, in iommufd_group_setup_msi()
323 * it returns -EBUSY on later calls. in iommufd_group_setup_msi()
325 hwpt_paging->msi_cookie = true; in iommufd_group_setup_msi()
336 lockdep_assert_held(&idev->igroup->lock); in iommufd_device_attach_reserved_iova()
338 rc = iopt_table_enforce_dev_resv_regions(&hwpt_paging->ioas->iopt, in iommufd_device_attach_reserved_iova()
339 idev->dev, in iommufd_device_attach_reserved_iova()
340 &idev->igroup->sw_msi_start); in iommufd_device_attach_reserved_iova()
344 if (list_empty(&idev->igroup->device_list)) { in iommufd_device_attach_reserved_iova()
345 rc = iommufd_group_setup_msi(idev->igroup, hwpt_paging); in iommufd_device_attach_reserved_iova()
347 iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, in iommufd_device_attach_reserved_iova()
348 idev->dev); in iommufd_device_attach_reserved_iova()
357 /* Check if idev is attached to igroup->hwpt */
362 list_for_each_entry(cur, &idev->igroup->device_list, group_item) in iommufd_device_is_attached()
374 lockdep_assert_held(&idev->igroup->lock); in iommufd_hwpt_attach_device()
378 return -ENOMEM; in iommufd_hwpt_attach_device()
380 if (hwpt->fault) { in iommufd_hwpt_attach_device()
386 handle->idev = idev; in iommufd_hwpt_attach_device()
387 rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, in iommufd_hwpt_attach_device()
388 &handle->handle); in iommufd_hwpt_attach_device()
395 if (hwpt->fault) in iommufd_hwpt_attach_device()
407 lockdep_assert_held(&idev->igroup->lock); in iommufd_device_get_attach_handle()
410 iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0); in iommufd_device_get_attach_handle()
422 iommu_detach_group_handle(hwpt->domain, idev->igroup->group); in iommufd_hwpt_detach_device()
423 if (hwpt->fault) { in iommufd_hwpt_detach_device()
440 return -ENOMEM; in iommufd_hwpt_replace_device()
442 if (hwpt->fault && !old->fault) { in iommufd_hwpt_replace_device()
448 handle->idev = idev; in iommufd_hwpt_replace_device()
449 rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain, in iommufd_hwpt_replace_device()
450 &handle->handle); in iommufd_hwpt_replace_device()
454 if (old->fault) { in iommufd_hwpt_replace_device()
456 if (!hwpt->fault) in iommufd_hwpt_replace_device()
464 if (hwpt->fault && !old->fault) in iommufd_hwpt_replace_device()
477 mutex_lock(&idev->igroup->lock); in iommufd_hw_pagetable_attach()
479 if (idev->igroup->hwpt != NULL && idev->igroup->hwpt != hwpt) { in iommufd_hw_pagetable_attach()
480 rc = -EINVAL; in iommufd_hw_pagetable_attach()
493 * should attach every device individually to the hwpt as the per-device in iommufd_hw_pagetable_attach()
497 if (list_empty(&idev->igroup->device_list)) { in iommufd_hw_pagetable_attach()
501 idev->igroup->hwpt = hwpt; in iommufd_hw_pagetable_attach()
503 refcount_inc(&hwpt->obj.users); in iommufd_hw_pagetable_attach()
504 list_add_tail(&idev->group_item, &idev->igroup->device_list); in iommufd_hw_pagetable_attach()
505 mutex_unlock(&idev->igroup->lock); in iommufd_hw_pagetable_attach()
509 iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev); in iommufd_hw_pagetable_attach()
511 mutex_unlock(&idev->igroup->lock); in iommufd_hw_pagetable_attach()
518 struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt; in iommufd_hw_pagetable_detach()
521 mutex_lock(&idev->igroup->lock); in iommufd_hw_pagetable_detach()
522 list_del(&idev->group_item); in iommufd_hw_pagetable_detach()
523 if (list_empty(&idev->igroup->device_list)) { in iommufd_hw_pagetable_detach()
525 idev->igroup->hwpt = NULL; in iommufd_hw_pagetable_detach()
528 iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev); in iommufd_hw_pagetable_detach()
529 mutex_unlock(&idev->igroup->lock); in iommufd_hw_pagetable_detach()
553 lockdep_assert_held(&igroup->lock); in iommufd_group_remove_reserved_iova()
555 list_for_each_entry(cur, &igroup->device_list, group_item) in iommufd_group_remove_reserved_iova()
556 iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, cur->dev); in iommufd_group_remove_reserved_iova()
567 lockdep_assert_held(&igroup->lock); in iommufd_group_do_replace_reserved_iova()
569 old_hwpt_paging = find_hwpt_paging(igroup->hwpt); in iommufd_group_do_replace_reserved_iova()
570 if (!old_hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas) { in iommufd_group_do_replace_reserved_iova()
571 list_for_each_entry(cur, &igroup->device_list, group_item) { in iommufd_group_do_replace_reserved_iova()
573 &hwpt_paging->ioas->iopt, cur->dev, NULL); in iommufd_group_do_replace_reserved_iova()
595 struct iommufd_group *igroup = idev->igroup; in iommufd_device_do_replace()
600 mutex_lock(&idev->igroup->lock); in iommufd_device_do_replace()
602 if (igroup->hwpt == NULL) { in iommufd_device_do_replace()
603 rc = -EINVAL; in iommufd_device_do_replace()
608 rc = -EINVAL; in iommufd_device_do_replace()
612 if (hwpt == igroup->hwpt) { in iommufd_device_do_replace()
613 mutex_unlock(&idev->igroup->lock); in iommufd_device_do_replace()
617 old_hwpt = igroup->hwpt; in iommufd_device_do_replace()
630 (!hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas)) in iommufd_device_do_replace()
633 igroup->hwpt = hwpt; in iommufd_device_do_replace()
635 num_devices = list_count_nodes(&igroup->device_list); in iommufd_device_do_replace()
640 refcount_add(num_devices, &hwpt->obj.users); in iommufd_device_do_replace()
642 WARN_ON(refcount_sub_and_test(num_devices - 1, in iommufd_device_do_replace()
643 &old_hwpt->obj.users)); in iommufd_device_do_replace()
644 mutex_unlock(&idev->igroup->lock); in iommufd_device_do_replace()
652 mutex_unlock(&idev->igroup->lock); in iommufd_device_do_replace()
686 mutex_lock(&ioas->mutex); in iommufd_device_auto_get_domain()
687 list_for_each_entry(hwpt_paging, &ioas->hwpt_list, hwpt_item) { in iommufd_device_auto_get_domain()
688 if (!hwpt_paging->auto_domain) in iommufd_device_auto_get_domain()
691 hwpt = &hwpt_paging->common; in iommufd_device_auto_get_domain()
692 if (!iommufd_lock_obj(&hwpt->obj)) in iommufd_device_auto_get_domain()
696 iommufd_put_object(idev->ictx, &hwpt->obj); in iommufd_device_auto_get_domain()
698 * -EINVAL means the domain is incompatible with the in iommufd_device_auto_get_domain()
703 if (PTR_ERR(destroy_hwpt) == -EINVAL) in iommufd_device_auto_get_domain()
707 *pt_id = hwpt->obj.id; in iommufd_device_auto_get_domain()
708 iommufd_put_object(idev->ictx, &hwpt->obj); in iommufd_device_auto_get_domain()
712 hwpt_paging = iommufd_hwpt_paging_alloc(idev->ictx, ioas, idev, 0, in iommufd_device_auto_get_domain()
718 hwpt = &hwpt_paging->common; in iommufd_device_auto_get_domain()
728 hwpt_paging->auto_domain = true; in iommufd_device_auto_get_domain()
729 *pt_id = hwpt->obj.id; in iommufd_device_auto_get_domain()
731 iommufd_object_finalize(idev->ictx, &hwpt->obj); in iommufd_device_auto_get_domain()
732 mutex_unlock(&ioas->mutex); in iommufd_device_auto_get_domain()
736 iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj); in iommufd_device_auto_get_domain()
738 mutex_unlock(&ioas->mutex); in iommufd_device_auto_get_domain()
748 pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY); in iommufd_device_change_pt()
752 switch (pt_obj->type) { in iommufd_device_change_pt()
774 destroy_hwpt = ERR_PTR(-EINVAL); in iommufd_device_change_pt()
777 iommufd_put_object(idev->ictx, pt_obj); in iommufd_device_change_pt()
781 iommufd_hw_pagetable_put(idev->ictx, destroy_hwpt); in iommufd_device_change_pt()
785 iommufd_put_object(idev->ictx, pt_obj); in iommufd_device_change_pt()
790 * iommufd_device_attach - Connect a device to an iommu_domain
793 * Output the IOMMUFD_OBJ_HWPT_PAGING ID
810 * Pairs with iommufd_device_detach() - catches caller bugs attempting in iommufd_device_attach()
813 refcount_inc(&idev->obj.users); in iommufd_device_attach()
819 * iommufd_device_replace - Change the device's iommu_domain
822 * Output the IOMMUFD_OBJ_HWPT_PAGING ID
829 * If it fails then no change is made to the attachment. The iommu driver may
841 * iommufd_device_detach - Disconnect a device to an iommu_domain
852 iommufd_hw_pagetable_put(idev->ictx, hwpt); in iommufd_device_detach()
853 refcount_dec(&idev->obj.users); in iommufd_device_detach()
859 * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
865 u32 iopt_access_list_id = access->iopt_access_list_id; in iommufd_access_change_ioas()
866 struct iommufd_ioas *cur_ioas = access->ioas; in iommufd_access_change_ioas()
869 lockdep_assert_held(&access->ioas_lock); in iommufd_access_change_ioas()
872 if (cur_ioas != access->ioas_unpin) in iommufd_access_change_ioas()
873 return -EBUSY; in iommufd_access_change_ioas()
880 * iommufd_access_unpin_pages() can continue using access->ioas_unpin. in iommufd_access_change_ioas()
882 access->ioas = NULL; in iommufd_access_change_ioas()
885 rc = iopt_add_access(&new_ioas->iopt, access); in iommufd_access_change_ioas()
887 access->ioas = cur_ioas; in iommufd_access_change_ioas()
890 refcount_inc(&new_ioas->obj.users); in iommufd_access_change_ioas()
894 if (access->ops->unmap) { in iommufd_access_change_ioas()
895 mutex_unlock(&access->ioas_lock); in iommufd_access_change_ioas()
896 access->ops->unmap(access->data, 0, ULONG_MAX); in iommufd_access_change_ioas()
897 mutex_lock(&access->ioas_lock); in iommufd_access_change_ioas()
899 iopt_remove_access(&cur_ioas->iopt, access, iopt_access_list_id); in iommufd_access_change_ioas()
900 refcount_dec(&cur_ioas->obj.users); in iommufd_access_change_ioas()
903 access->ioas = new_ioas; in iommufd_access_change_ioas()
904 access->ioas_unpin = new_ioas; in iommufd_access_change_ioas()
909 static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id) in iommufd_access_change_ioas_id() argument
911 struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id); in iommufd_access_change_ioas_id()
917 iommufd_put_object(access->ictx, &ioas->obj); in iommufd_access_change_ioas_id()
926 mutex_lock(&access->ioas_lock); in iommufd_access_destroy_object()
927 if (access->ioas) in iommufd_access_destroy_object()
929 mutex_unlock(&access->ioas_lock); in iommufd_access_destroy_object()
930 iommufd_ctx_put(access->ictx); in iommufd_access_destroy_object()
934 * iommufd_access_create - Create an iommufd_access
938 * @id: Output ID number to return to userspace for this access
948 const struct iommufd_access_ops *ops, void *data, u32 *id) in iommufd_access_create() argument
960 access->data = data; in iommufd_access_create()
961 access->ops = ops; in iommufd_access_create()
963 if (ops->needs_pin_pages) in iommufd_access_create()
964 access->iova_alignment = PAGE_SIZE; in iommufd_access_create()
966 access->iova_alignment = 1; in iommufd_access_create()
969 refcount_inc(&access->obj.users); in iommufd_access_create()
970 access->ictx = ictx; in iommufd_access_create()
972 iommufd_object_finalize(ictx, &access->obj); in iommufd_access_create()
973 *id = access->obj.id; in iommufd_access_create()
974 mutex_init(&access->ioas_lock); in iommufd_access_create()
980 * iommufd_access_destroy - Destroy an iommufd_access
987 iommufd_object_destroy_user(access->ictx, &access->obj); in iommufd_access_destroy()
993 mutex_lock(&access->ioas_lock); in iommufd_access_detach()
994 if (WARN_ON(!access->ioas)) { in iommufd_access_detach()
995 mutex_unlock(&access->ioas_lock); in iommufd_access_detach()
999 mutex_unlock(&access->ioas_lock); in iommufd_access_detach()
1007 mutex_lock(&access->ioas_lock); in iommufd_access_attach()
1008 if (WARN_ON(access->ioas)) { in iommufd_access_attach()
1009 mutex_unlock(&access->ioas_lock); in iommufd_access_attach()
1010 return -EINVAL; in iommufd_access_attach()
1014 mutex_unlock(&access->ioas_lock); in iommufd_access_attach()
1023 mutex_lock(&access->ioas_lock); in iommufd_access_replace()
1024 if (!access->ioas) { in iommufd_access_replace()
1025 mutex_unlock(&access->ioas_lock); in iommufd_access_replace()
1026 return -ENOENT; in iommufd_access_replace()
1029 mutex_unlock(&access->ioas_lock); in iommufd_access_replace()
1035 * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
1058 xa_lock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
1059 xa_for_each(&ioas->iopt.access_list, index, access) { in iommufd_access_notify_unmap()
1060 if (!iommufd_lock_obj(&access->obj)) in iommufd_access_notify_unmap()
1062 xa_unlock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
1064 access->ops->unmap(access->data, iova, length); in iommufd_access_notify_unmap()
1066 iommufd_put_object(access->ictx, &access->obj); in iommufd_access_notify_unmap()
1067 xa_lock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
1069 xa_unlock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
1073 * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
1090 WARN_ON(check_add_overflow(iova, length - 1, &last_iova))) in iommufd_access_unpin_pages()
1093 mutex_lock(&access->ioas_lock); in iommufd_access_unpin_pages()
1098 if (WARN_ON(!access->ioas_unpin)) { in iommufd_access_unpin_pages()
1099 mutex_unlock(&access->ioas_lock); in iommufd_access_unpin_pages()
1102 iopt = &access->ioas_unpin->iopt; in iommufd_access_unpin_pages()
1104 down_read(&iopt->iova_rwsem); in iommufd_access_unpin_pages()
1112 up_read(&iopt->iova_rwsem); in iommufd_access_unpin_pages()
1113 mutex_unlock(&access->ioas_lock); in iommufd_access_unpin_pages()
1119 if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE) in iopt_area_contig_is_aligned()
1123 (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) % in iopt_area_contig_is_aligned()
1124 PAGE_SIZE) != (PAGE_SIZE - 1)) in iopt_area_contig_is_aligned()
1132 return area->iommu_prot & IOMMU_WRITE; in check_area_prot()
1133 return area->iommu_prot & IOMMU_READ; in check_area_prot()
1137 * iommufd_access_pin_pages() - Return a list of pages under the iova
1167 WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap)) in iommufd_access_pin_pages()
1168 return -EINVAL; in iommufd_access_pin_pages()
1171 return -EINVAL; in iommufd_access_pin_pages()
1172 if (check_add_overflow(iova, length - 1, &last_iova)) in iommufd_access_pin_pages()
1173 return -EOVERFLOW; in iommufd_access_pin_pages()
1175 mutex_lock(&access->ioas_lock); in iommufd_access_pin_pages()
1176 if (!access->ioas) { in iommufd_access_pin_pages()
1177 mutex_unlock(&access->ioas_lock); in iommufd_access_pin_pages()
1178 return -ENOENT; in iommufd_access_pin_pages()
1180 iopt = &access->ioas->iopt; in iommufd_access_pin_pages()
1182 down_read(&iopt->iova_rwsem); in iommufd_access_pin_pages()
1189 if (area->prevent_access || in iommufd_access_pin_pages()
1191 rc = -EINVAL; in iommufd_access_pin_pages()
1196 rc = -EPERM; in iommufd_access_pin_pages()
1204 out_pages += last_index - index + 1; in iommufd_access_pin_pages()
1207 rc = -ENOENT; in iommufd_access_pin_pages()
1211 up_read(&iopt->iova_rwsem); in iommufd_access_pin_pages()
1212 mutex_unlock(&access->ioas_lock); in iommufd_access_pin_pages()
1217 last_iova = iter.cur_iova - 1; in iommufd_access_pin_pages()
1226 up_read(&iopt->iova_rwsem); in iommufd_access_pin_pages()
1227 mutex_unlock(&access->ioas_lock); in iommufd_access_pin_pages()
1233 * iommufd_access_rw - Read or write data under the iova
1251 int rc = -EINVAL; in iommufd_access_rw()
1254 return -EINVAL; in iommufd_access_rw()
1255 if (check_add_overflow(iova, length - 1, &last_iova)) in iommufd_access_rw()
1256 return -EOVERFLOW; in iommufd_access_rw()
1258 mutex_lock(&access->ioas_lock); in iommufd_access_rw()
1259 if (!access->ioas) { in iommufd_access_rw()
1260 mutex_unlock(&access->ioas_lock); in iommufd_access_rw()
1261 return -ENOENT; in iommufd_access_rw()
1263 iopt = &access->ioas->iopt; in iommufd_access_rw()
1265 down_read(&iopt->iova_rwsem); in iommufd_access_rw()
1268 unsigned long bytes = (last - iter.cur_iova) + 1; in iommufd_access_rw()
1270 if (area->prevent_access) { in iommufd_access_rw()
1271 rc = -EINVAL; in iommufd_access_rw()
1276 rc = -EPERM; in iommufd_access_rw()
1281 area->pages, iopt_area_start_byte(area, iter.cur_iova), in iommufd_access_rw()
1288 rc = -ENOENT; in iommufd_access_rw()
1290 up_read(&iopt->iova_rwsem); in iommufd_access_rw()
1291 mutex_unlock(&access->ioas_lock); in iommufd_access_rw()
1298 struct iommu_hw_info *cmd = ucmd->cmd; in iommufd_get_hw_info()
1299 void __user *user_ptr = u64_to_user_ptr(cmd->data_uptr); in iommufd_get_hw_info()
1307 if (cmd->flags || cmd->__reserved) in iommufd_get_hw_info()
1308 return -EOPNOTSUPP; in iommufd_get_hw_info()
1310 idev = iommufd_get_device(ucmd, cmd->dev_id); in iommufd_get_hw_info()
1314 ops = dev_iommu_ops(idev->dev); in iommufd_get_hw_info()
1315 if (ops->hw_info) { in iommufd_get_hw_info()
1316 data = ops->hw_info(idev->dev, &data_len, &cmd->out_data_type); in iommufd_get_hw_info()
1326 if (WARN_ON_ONCE(cmd->out_data_type == in iommufd_get_hw_info()
1328 rc = -ENODEV; in iommufd_get_hw_info()
1332 cmd->out_data_type = IOMMU_HW_INFO_TYPE_NONE; in iommufd_get_hw_info()
1337 copy_len = min(cmd->data_len, data_len); in iommufd_get_hw_info()
1339 rc = -EFAULT; in iommufd_get_hw_info()
1347 if (copy_len < cmd->data_len) { in iommufd_get_hw_info()
1348 if (clear_user(user_ptr + copy_len, cmd->data_len - copy_len)) { in iommufd_get_hw_info()
1349 rc = -EFAULT; in iommufd_get_hw_info()
1358 cmd->data_len = data_len; in iommufd_get_hw_info()
1360 cmd->out_capabilities = 0; in iommufd_get_hw_info()
1361 if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) in iommufd_get_hw_info()
1362 cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING; in iommufd_get_hw_info()
1368 iommufd_put_object(ucmd->ictx, &idev->obj); in iommufd_get_hw_info()