Lines Matching +full:0 +full:xe
46 static size_t calc_hw_engine_info_size(struct xe_device *xe) in calc_hw_engine_info_size() argument
52 int i = 0; in calc_hw_engine_info_size()
54 for_each_gt(gt, xe, gt_id) in calc_hw_engine_info_size()
93 u32 upper, lower, old_upper, loop = 0; in hwe_read_timestamp()
111 query_engine_cycles(struct xe_device *xe, in query_engine_cycles() argument
123 if (query->size == 0) { in query_engine_cycles()
125 return 0; in query_engine_cycles()
126 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engine_cycles()
142 gt = xe_device_get_gt(xe, eci->gt_id); in query_engine_cycles()
165 if (GRAPHICS_VER(xe) >= 20) in query_engine_cycles()
177 return 0; in query_engine_cycles()
180 static int query_engines(struct xe_device *xe, in query_engines() argument
183 size_t size = calc_hw_engine_info_size(xe); in query_engines()
191 int i = 0; in query_engines()
193 if (query->size == 0) { in query_engines()
195 return 0; in query_engines()
196 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_engines()
204 for_each_gt(gt, xe, gt_id) in query_engines()
226 return 0; in query_engines()
229 static size_t calc_mem_regions_size(struct xe_device *xe) in calc_mem_regions_size() argument
235 if (ttm_manager_type(&xe->ttm, i)) in calc_mem_regions_size()
241 static int query_mem_regions(struct xe_device *xe, in query_mem_regions() argument
244 size_t size = calc_mem_regions_size(xe); in query_mem_regions()
251 if (query->size == 0) { in query_mem_regions()
253 return 0; in query_mem_regions()
254 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_mem_regions()
259 if (XE_IOCTL_DBG(xe, !mem_regions)) in query_mem_regions()
262 man = ttm_manager_type(&xe->ttm, XE_PL_TT); in query_mem_regions()
263 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; in query_mem_regions()
269 mem_regions->mem_regions[0].instance = 0; in query_mem_regions()
270 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; in query_mem_regions()
271 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; in query_mem_regions()
273 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); in query_mem_regions()
277 man = ttm_manager_type(&xe->ttm, i); in query_mem_regions()
284 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? in query_mem_regions()
304 ret = 0; in query_mem_regions()
312 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) in query_config() argument
321 if (query->size == 0) { in query_config()
323 return 0; in query_config()
324 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_config()
334 xe->info.devid | (xe->info.revid << 16); in query_config()
335 if (xe_device_get_root_tile(xe)->mem.vram.usable_size) in query_config()
339 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; in query_config()
340 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; in query_config()
342 xe_exec_queue_device_get_max_priority(xe); in query_config()
350 return 0; in query_config()
353 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) in query_gt_list() argument
357 xe->info.gt_count * sizeof(struct drm_xe_gt); in query_gt_list()
363 if (query->size == 0) { in query_gt_list()
365 return 0; in query_gt_list()
366 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_list()
374 gt_list->num_gt = xe->info.gt_count; in query_gt_list()
376 for_each_gt(gt, xe, id) { in query_gt_list()
391 * Bit 0 -> System Memory in query_gt_list()
398 if (!IS_DGFX(xe)) in query_gt_list()
399 gt_list->gt_list[id].near_mem_regions = 0x1; in query_gt_list()
403 gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^ in query_gt_list()
420 return 0; in query_gt_list()
423 static int query_hwconfig(struct xe_device *xe, in query_hwconfig() argument
426 struct xe_gt *gt = xe_root_mmio_gt(xe); in query_hwconfig()
431 if (query->size == 0) { in query_hwconfig()
433 return 0; in query_hwconfig()
434 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_hwconfig()
450 return 0; in query_hwconfig()
453 static size_t calc_topo_query_size(struct xe_device *xe) in calc_topo_query_size() argument
456 size_t query_size = 0; in calc_topo_query_size()
459 for_each_gt(gt, xe, id) { in calc_topo_query_size()
488 return 0; in copy_mask()
491 static int query_gt_topology(struct xe_device *xe, in query_gt_topology() argument
495 size_t size = calc_topo_query_size(xe); in query_gt_topology()
500 if (query->size == 0) { in query_gt_topology()
502 return 0; in query_gt_topology()
503 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_gt_topology()
507 for_each_gt(gt, xe, id) { in query_gt_topology()
547 return 0; in query_gt_topology()
551 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) in query_uc_fw_version() argument
558 if (query->size == 0) { in query_uc_fw_version()
560 return 0; in query_uc_fw_version()
561 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_uc_fw_version()
568 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) in query_uc_fw_version()
573 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; in query_uc_fw_version()
582 if (MEDIA_VER(xe) >= 13) { in query_uc_fw_version()
586 for_each_tile(tile, xe, gt_id) { in query_uc_fw_version()
593 media_gt = xe->tiles[0].primary_gt; in query_uc_fw_version()
609 resp.branch_ver = 0; in query_uc_fw_version()
620 return 0; in query_uc_fw_version()
623 static size_t calc_oa_unit_query_size(struct xe_device *xe) in calc_oa_unit_query_size() argument
629 for_each_gt(gt, xe, id) { in calc_oa_unit_query_size()
630 for (i = 0; i < gt->oa.num_oa_units; i++) { in calc_oa_unit_query_size()
640 static int query_oa_units(struct xe_device *xe, in query_oa_units() argument
644 size_t size = calc_oa_unit_query_size(xe); in query_oa_units()
654 if (query->size == 0) { in query_oa_units()
656 return 0; in query_oa_units()
657 } else if (XE_IOCTL_DBG(xe, query->size != size)) { in query_oa_units()
665 pdu = (u8 *)&qoa->oa_units[0]; in query_oa_units()
666 for_each_gt(gt, xe, gt_id) { in query_oa_units()
667 for (i = 0; i < gt->oa.num_oa_units; i++) { in query_oa_units()
678 j = 0; in query_oa_units()
690 pdu += sizeof(*du) + j * sizeof(du->eci[0]); in query_oa_units()
698 return ret ? -EFAULT : 0; in query_oa_units()
701 static int (* const xe_query_funcs[])(struct xe_device *xe,
716 struct xe_device *xe = to_xe_device(dev); in xe_query_ioctl() local
720 if (XE_IOCTL_DBG(xe, query->extensions) || in xe_query_ioctl()
721 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) in xe_query_ioctl()
724 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) in xe_query_ioctl()
728 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) in xe_query_ioctl()
731 return xe_query_funcs[idx](xe, query); in xe_query_ioctl()