Lines Matching +full:half +full:- +full:dbm

1 // SPDX-License-Identifier: MIT
55 addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id); in guc_bo_ggtt_addr()
60 xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr); in guc_bo_ggtt_addr()
67 u32 level = xe_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
83 if (!guc_to_xe(guc)->info.skip_guc_pc) in guc_ctl_feature_flags()
91 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
117 BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) > in guc_ctl_log_params_flags()
119 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) > in guc_ctl_log_params_flags()
121 BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) > in guc_ctl_log_params_flags()
128 ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) | in guc_ctl_log_params_flags()
129 ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | in guc_ctl_log_params_flags()
130 ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << in guc_ctl_log_params_flags()
144 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; in guc_ctl_ads_flags()
166 * can cause complications if the non-stalled submission runs for a long in needs_wa_dual_queue()
194 * Wa_22011802037: FIXME - there's more to be done than simply setting in guc_ctl_wa_flags()
221 return (((u32)xe->info.devid) << 16) | xe->info.revid; in guc_ctl_devid()
227 u32 *params = guc->params; in guc_print_params()
230 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); in guc_print_params()
239 u32 *params = guc->params; in guc_init_params()
253 u32 *params = guc->params; in guc_init_params_post_hwconfig()
277 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0); in guc_write_params()
280 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]); in guc_write_params()
290 FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) | in guc_action_register_g2g_buffer()
301 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_register_g2g_buffer()
317 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_deregister_g2g_buffer()
320 #define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
327 * Generate a unique id for each bi-directional CTB for each pair of
329 * a single allocation that is sub-divided into multiple CTBs.
335 * N 0.0 --/-- 00/01 02/03 04/05
336 * e 0.1 01/00 --/-- 06/07 08/09
337 * a 1.0 03/02 07/06 --/-- 10/11
338 * r 1.1 05/04 09/08 11/10 --/--
360 return -1; in g2g_slot()
363 /* Top right table half */ in g2g_slot()
370 /* Bottom left table half */ in g2g_slot()
375 direction = (1 - type); in g2g_slot()
379 for (i = y; i > 0; i--) in g2g_slot()
380 idx += max_inst - i; in g2g_slot()
383 idx += (x - 1 - y); in g2g_slot()
399 u32 near_tile = gt_to_tile(near_gt)->id; in guc_g2g_register()
401 u32 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_register()
403 u32 max = xe->info.gt_count; in guc_g2g_register()
410 g2g_bo = near_guc->g2g.bo; in guc_g2g_register()
420 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); in guc_g2g_register()
421 xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size); in guc_g2g_register()
436 unsigned int count = xe->info.gt_count; in guc_g2g_size()
437 u32 num_channels = (count * (count - 1)) / 2; in guc_g2g_size()
447 if (xe->info.gt_count <= 1) in xe_guc_g2g_wanted()
462 if (guc->g2g.bo) in guc_g2g_alloc()
465 if (gt->info.id != 0) { in guc_g2g_alloc()
467 struct xe_guc *root_guc = &root_gt->uc.guc; in guc_g2g_alloc()
470 bo = xe_bo_get(root_guc->g2g.bo); in guc_g2g_alloc()
472 return -ENODEV; in guc_g2g_alloc()
474 guc->g2g.bo = bo; in guc_g2g_alloc()
475 guc->g2g.owned = false; in guc_g2g_alloc()
488 xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size); in guc_g2g_alloc()
489 guc->g2g.bo = bo; in guc_g2g_alloc()
490 guc->g2g.owned = true; in guc_g2g_alloc()
497 if (!guc->g2g.bo) in guc_g2g_fini()
501 if (!guc->g2g.owned) in guc_g2g_fini()
502 xe_bo_put(guc->g2g.bo); in guc_g2g_fini()
504 guc->g2g.bo = NULL; in guc_g2g_fini()
515 if (!guc->g2g.bo) { in guc_g2g_start()
524 xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA)); in guc_g2g_start()
527 have_dev = xe->info.gt_count > xe->info.tile_count; in guc_g2g_start()
532 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
535 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
541 while (--t >= 0) in guc_g2g_start()
554 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
560 tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
577 xe_uc_fini_hw(&guc_to_gt(guc)->uc); in guc_fini_hw()
584 * xe_guc_comm_init_early - early initialization of GuC communication
594 guc->notify_reg = MED_GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
596 guc->notify_reg = GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
608 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo); in xe_guc_realloc_post_hwconfig()
612 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo); in xe_guc_realloc_post_hwconfig()
616 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo); in xe_guc_realloc_post_hwconfig()
620 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo); in xe_guc_realloc_post_hwconfig()
633 err = xe_guc_ct_init(&guc->ct); in vf_guc_init()
637 err = xe_guc_relay_init(&guc->relay); in vf_guc_init()
650 guc->fw.type = XE_UC_FW_TYPE_GUC; in xe_guc_init()
651 ret = xe_uc_fw_init(&guc->fw); in xe_guc_init()
655 if (!xe_uc_fw_is_enabled(&guc->fw)) in xe_guc_init()
665 ret = xe_guc_log_init(&guc->log); in xe_guc_init()
673 ret = xe_guc_ads_init(&guc->ads); in xe_guc_init()
677 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
681 ret = xe_guc_relay_init(&guc->relay); in xe_guc_init()
685 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); in xe_guc_init()
687 ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); in xe_guc_init()
716 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
738 ret = xe_guc_db_mgr_init(&guc->dbm, ~0); in xe_guc_init_post_hwconfig()
742 ret = xe_guc_pc_init(&guc->pc); in xe_guc_init_post_hwconfig()
746 return xe_guc_ads_init_post_hwconfig(&guc->ads); in xe_guc_init_post_hwconfig()
753 xe_guc_ads_populate_post_load(&guc->ads); in xe_guc_post_load_init()
761 guc->submission_state.enabled = true; in xe_guc_post_load_init()
769 struct xe_mmio *mmio = &gt->mmio; in xe_guc_reset()
790 ret = -EIO; in xe_guc_reset()
804 struct xe_mmio *mmio = &gt->mmio; in guc_prepare_xfer()
815 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) in guc_prepare_xfer()
816 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); in guc_prepare_xfer()
837 if (guc->fw.rsa_size > 256) { in guc_xfer_rsa()
838 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + in guc_xfer_rsa()
839 xe_uc_fw_rsa_offset(&guc->fw); in guc_xfer_rsa()
840 xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); in guc_xfer_rsa()
844 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); in guc_xfer_rsa()
846 return -ENOMEM; in guc_xfer_rsa()
849 xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]); in guc_xfer_rsa()
858 * successful completion, -1 for failure and 0 for any intermediate state.
878 return -1; in guc_load_done()
892 return -1; in guc_load_done()
939 struct xe_mmio *mmio = &gt->mmio; in guc_wait_ucode()
940 struct xe_guc_pc *guc_pc = &gt->uc.guc.pc; in guc_wait_ucode()
1050 xe_guc_pc_raise_unslice(&guc->pc); in __xe_guc_upload()
1059 * register (if key size <= 256) or through a ggtt-pinned vma (if key in __xe_guc_upload()
1061 * HW is fixed for each platform and hard-coded in the bootrom. in __xe_guc_upload()
1070 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); in __xe_guc_upload()
1077 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); in __xe_guc_upload()
1081 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); in __xe_guc_upload()
1118 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1135 xe_guc_ads_populate_minimal(&guc->ads); in xe_guc_min_load_for_hwconfig()
1137 xe_guc_pc_init_early(&guc->pc); in xe_guc_min_load_for_hwconfig()
1156 xe_guc_ads_populate(&guc->ads); in xe_guc_upload()
1171 msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15)); in guc_handle_mmio_msg()
1174 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0); in guc_handle_mmio_msg()
1191 xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE, in guc_enable_irq()
1198 xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0); in guc_enable_irq()
1210 err = xe_memirq_init_guc(&tile->memirq, guc); in xe_guc_enable_communication()
1217 err = xe_guc_ct_enable(&guc->ct); in xe_guc_enable_communication()
1254 xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data); in xe_guc_notify()
1264 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in xe_guc_auth_huc()
1272 struct xe_mmio *mmio = &gt->mmio; in xe_guc_mmio_send_recv()
1276 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1; in xe_guc_mmio_send_recv()
1291 /* Not in critical data-path, just do if else for GT type */ in xe_guc_mmio_send_recv()
1362 return -ENXIO; in xe_guc_mmio_send_recv()
1370 return -EPROTO; in xe_guc_mmio_send_recv()
1418 return -EPROTO; in guc_self_cfg()
1420 return -ENOKEY; in guc_self_cfg()
1446 xe_guc_ct_irq_handler(&guc->ct); in xe_guc_irq_handler()
1454 xe_uc_fw_sanitize(&guc->fw); in xe_guc_sanitize()
1455 xe_guc_ct_disable(&guc->ct); in xe_guc_sanitize()
1456 guc->submission_state.enabled = false; in xe_guc_sanitize()
1474 err = xe_guc_pc_stop(&guc->pc); in xe_guc_stop_prepare()
1482 xe_guc_ct_stop(&guc->ct); in xe_guc_stop()
1492 err = xe_guc_pc_start(&guc->pc); in xe_guc_start()
1507 xe_uc_fw_print(&guc->fw, p); in xe_guc_print_info()
1513 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); in xe_guc_print_info()
1523 xe_guc_log_get_level(&guc->log)); in xe_guc_print_info()
1528 i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i))); in xe_guc_print_info()
1534 xe_guc_ct_print(&guc->ct, p, false); in xe_guc_print_info()
1541 * xe_guc_declare_wedged() - Declare GuC wedged
1549 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); in xe_guc_declare_wedged()
1552 xe_guc_ct_stop(&guc->ct); in xe_guc_declare_wedged()