/linux-6.14.4/drivers/gpu/drm/xe/ |
D | xe_gt.c | 66 struct xe_gt *gt = arg; in gt_fini() local 68 destroy_workqueue(gt->ordered_wq); in gt_fini() 73 struct xe_gt *gt; in xe_gt_alloc() local 76 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL); in xe_gt_alloc() 77 if (!gt) in xe_gt_alloc() 80 gt->tile = tile; in xe_gt_alloc() 81 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", in xe_gt_alloc() 84 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); in xe_gt_alloc() 88 return gt; in xe_gt_alloc() 91 void xe_gt_sanitize(struct xe_gt *gt) in xe_gt_sanitize() argument [all …]
|
D | xe_gt_sriov_pf_policy.c | 37 static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords) in pf_send_policy_klvs() argument 40 struct xe_tile *tile = gt_to_tile(gt); in pf_send_policy_klvs() 42 struct xe_guc *guc = >->uc.guc; in pf_send_policy_klvs() 67 static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs, in pf_push_policy_klvs() argument 72 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); in pf_push_policy_klvs() 74 ret = pf_send_policy_klvs(gt, klvs, num_dwords); in pf_push_policy_klvs() 78 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_policy_klvs() 80 xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n", in pf_push_policy_klvs() 89 static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value) in pf_push_policy_u32() argument 96 return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv)); in pf_push_policy_u32() [all …]
|
D | xe_gt_sriov_pf_control.c | 56 static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd) in pf_send_vf_control_cmd() argument 60 xe_gt_assert(gt, vfid != PFID); in pf_send_vf_control_cmd() 61 xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n", in pf_send_vf_control_cmd() 64 err = guc_action_vf_control_cmd(>->uc.guc, vfid, cmd); in pf_send_vf_control_cmd() 66 xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n", in pf_send_vf_control_cmd() 71 static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_pause() argument 73 return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE); in pf_send_vf_pause() 76 static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_resume() argument 78 return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME); in pf_send_vf_resume() 81 static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid) in pf_send_vf_stop() argument [all …]
|
D | xe_gt_sriov_pf_config.c | 60 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) in pf_send_vf_cfg_reset() argument 62 struct xe_guc *guc = >->uc.guc; in pf_send_vf_cfg_reset() 74 static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) in pf_send_vf_cfg_klvs() argument 77 struct xe_tile *tile = gt_to_tile(gt); in pf_send_vf_cfg_klvs() 79 struct xe_guc *guc = >->uc.guc; in pf_send_vf_cfg_klvs() 105 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, in pf_push_vf_cfg_klvs() argument 110 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); in pf_push_vf_cfg_klvs() 112 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); in pf_push_vf_cfg_klvs() 116 struct drm_printer p = xe_gt_info_printer(gt); in pf_push_vf_cfg_klvs() 119 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n", in pf_push_vf_cfg_klvs() [all …]
|
D | xe_gt_debugfs.c | 58 * int foo(struct xe_gt *gt, struct drm_printer *p) 60 * drm_printf(p, "GT%u\n", gt->info.id); 68 * dir = debugfs_create_dir("gt", parent); 69 * dir->d_inode->i_private = gt; 79 struct xe_gt *gt = parent->d_inode->i_private; in xe_gt_debugfs_simple_show() local 85 return print(gt, &p); in xe_gt_debugfs_simple_show() 88 static int hw_engines(struct xe_gt *gt, struct drm_printer *p) in hw_engines() argument 90 struct xe_device *xe = gt_to_xe(gt); in hw_engines() 96 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); in hw_engines() 99 xe_force_wake_put(gt_to_fw(gt), fw_ref); in hw_engines() [all …]
|
D | xe_gt_sriov_pf_migration.c | 36 static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid) in pf_send_guc_query_vf_state_size() argument 40 ret = guc_action_vf_save_restore(>->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0); in pf_send_guc_query_vf_state_size() 45 static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid, in pf_send_guc_save_vf_state() argument 49 struct xe_tile *tile = gt_to_tile(gt); in pf_send_guc_save_vf_state() 51 struct xe_guc *guc = >->uc.guc; in pf_send_guc_save_vf_state() 55 xe_gt_assert(gt, size % sizeof(u32) == 0); in pf_send_guc_save_vf_state() 56 xe_gt_assert(gt, size == ndwords * sizeof(u32)); in pf_send_guc_save_vf_state() 81 static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid, in pf_send_guc_restore_vf_state() argument 85 struct xe_tile *tile = gt_to_tile(gt); in pf_send_guc_restore_vf_state() 87 struct xe_guc *guc = >->uc.guc; in pf_send_guc_restore_vf_state() [all …]
|
D | xe_gt_tlb_invalidation.c | 29 static long tlb_timeout_jiffies(struct xe_gt *gt) in tlb_timeout_jiffies() argument 35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct); in tlb_timeout_jiffies() 42 if (WARN_ON_ONCE(!fence->gt)) in xe_gt_tlb_invalidation_fence_fini() 45 xe_pm_runtime_put(gt_to_xe(fence->gt)); in xe_gt_tlb_invalidation_fence_fini() 46 fence->gt = NULL; /* fini() should be called once */ in xe_gt_tlb_invalidation_fence_fini() 70 if (WARN_ON_ONCE(!fence->gt)) in xe_gt_tlb_invalidation_fence_signal() 73 __invalidation_fence_signal(gt_to_xe(fence->gt), fence); in xe_gt_tlb_invalidation_fence_signal() 78 struct xe_gt *gt = container_of(work, struct xe_gt, in xe_gt_tlb_fence_timeout() local 80 struct xe_device *xe = gt_to_xe(gt); in xe_gt_tlb_fence_timeout() 83 LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); in xe_gt_tlb_fence_timeout() [all …]
|
D | xe_gt_throttle.c | 18 * DOC: Xe GT Throttle 20 * Provides sysfs entries and other helpers for frequency throttle reasons in GT 22 * device/gt#/freq0/throttle/status - Overall status 23 * device/gt#/freq0/throttle/reason_pl1 - Frequency throttle due to PL1 24 * device/gt#/freq0/throttle/reason_pl2 - Frequency throttle due to PL2 25 * device/gt#/freq0/throttle/reason_pl4 - Frequency throttle due to PL4, Iccmax etc. 26 * device/gt#/freq0/throttle/reason_thermal - Frequency throttle due to thermal 27 * device/gt#/freq0/throttle/reason_prochot - Frequency throttle due to prochot 28 * device/gt#/freq0/throttle/reason_ratl - Frequency throttle due to RATL 29 * device/gt#/freq0/throttle/reason_vr_thermalert - Frequency throttle due to VR THERMALERT [all …]
|
D | xe_gt_sriov_vf.c | 50 static int vf_reset_guc_state(struct xe_gt *gt) in vf_reset_guc_state() argument 52 struct xe_guc *guc = >->uc.guc; in vf_reset_guc_state() 57 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); in vf_reset_guc_state() 63 * @gt: the &xe_gt 69 int xe_gt_sriov_vf_reset(struct xe_gt *gt) in xe_gt_sriov_vf_reset() argument 71 if (!xe_device_uc_enabled(gt_to_xe(gt))) in xe_gt_sriov_vf_reset() 74 return vf_reset_guc_state(gt); in xe_gt_sriov_vf_reset() 110 static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) in vf_minimum_guc_version() argument 112 struct xe_device *xe = gt_to_xe(gt); in vf_minimum_guc_version() 130 static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor) in vf_wanted_guc_version() argument [all …]
|
D | xe_gt.h | 22 #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) argument 31 int xe_gt_init_hwconfig(struct xe_gt *gt); 32 int xe_gt_init_early(struct xe_gt *gt); 33 int xe_gt_init(struct xe_gt *gt); 34 void xe_gt_mmio_init(struct xe_gt *gt); 35 void xe_gt_declare_wedged(struct xe_gt *gt); 36 int xe_gt_record_default_lrcs(struct xe_gt *gt); 41 * @gt: GT structure 43 * Walk the available HW engines from gt->info.engine_mask and calculate data 48 void xe_gt_record_user_engines(struct xe_gt *gt); [all …]
|
D | xe_gt_sriov_pf_config.h | 15 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid); 16 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size); 17 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, 19 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, 22 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid); 23 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs); 24 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs); 25 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs, 28 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid); 29 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs); [all …]
|
D | xe_gt_idle.c | 21 * DOC: Xe GT Idle 23 * Contains functions that init GT idle features like C6 25 * device/gt#/gtidle/name - name of the state 26 * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms 27 * device/gt#/gtidle/idle_status - Provides current idle state 51 struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc); in pc_to_xe() local 53 return gt_to_xe(gt); in pc_to_xe() 60 return "gt-c0"; in gt_idle_state_to_string() 62 return "gt-c6"; in gt_idle_state_to_string() 98 void xe_gt_idle_enable_pg(struct xe_gt *gt) in xe_gt_idle_enable_pg() argument [all …]
|
D | xe_gt_mcr.c | 19 * DOC: GT Multicast/Replicated (MCR) Register Support 21 * Some GT registers are designed as "multicast" or "replicated" registers: 251 static void init_steering_l3bank(struct xe_gt *gt) in init_steering_l3bank() argument 253 struct xe_mmio *mmio = >->mmio; in init_steering_l3bank() 255 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { in init_steering_l3bank() 265 gt->steering[L3BANK].group_target = __ffs(mslice_mask); in init_steering_l3bank() 266 gt->steering[L3BANK].instance_target = in init_steering_l3bank() 268 } else if (gt_to_xe(gt)->info.platform == XE_DG2) { in init_steering_l3bank() 278 gt->steering[L3BANK].group_target = (bank >> 2) & 0x7; in init_steering_l3bank() 279 gt->steering[L3BANK].instance_target = bank & 0x3; in init_steering_l3bank() [all …]
|
D | xe_gt_sriov_pf_service.c | 23 static void pf_init_versions(struct xe_gt *gt) in pf_init_versions() argument 29 gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; in pf_init_versions() 30 gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; in pf_init_versions() 33 gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; in pf_init_versions() 34 gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; in pf_init_versions() 38 static int pf_negotiate_version(struct xe_gt *gt, in pf_negotiate_version() argument 42 struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base; in pf_negotiate_version() 43 struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest; in pf_negotiate_version() 45 xe_gt_assert(gt, base.major); in pf_negotiate_version() 46 xe_gt_assert(gt, base.major <= latest.major); in pf_negotiate_version() [all …]
|
/linux-6.14.4/drivers/gpu/drm/i915/gt/ |
D | intel_gt_pm.c | 28 static void user_forcewake(struct intel_gt *gt, bool suspend) in user_forcewake() argument 30 int count = atomic_read(>->user_wakeref); in user_forcewake() 37 wakeref = intel_gt_pm_get(gt); in user_forcewake() 39 GEM_BUG_ON(count > atomic_read(>->wakeref.count)); in user_forcewake() 40 atomic_sub(count, >->wakeref.count); in user_forcewake() 42 atomic_add(count, >->wakeref.count); in user_forcewake() 44 intel_gt_pm_put(gt, wakeref); in user_forcewake() 47 static void runtime_begin(struct intel_gt *gt) in runtime_begin() argument 50 write_seqcount_begin(>->stats.lock); in runtime_begin() 51 gt->stats.start = ktime_get(); in runtime_begin() [all …]
|
D | intel_gt.c | 40 void intel_gt_common_init_early(struct intel_gt *gt) in intel_gt_common_init_early() argument 42 spin_lock_init(gt->irq_lock); in intel_gt_common_init_early() 44 INIT_LIST_HEAD(>->closed_vma); in intel_gt_common_init_early() 45 spin_lock_init(>->closed_lock); in intel_gt_common_init_early() 47 init_llist_head(>->watchdog.list); in intel_gt_common_init_early() 48 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work); in intel_gt_common_init_early() 50 intel_gt_init_buffer_pool(gt); in intel_gt_common_init_early() 51 intel_gt_init_reset(gt); in intel_gt_common_init_early() 52 intel_gt_init_requests(gt); in intel_gt_common_init_early() 53 intel_gt_init_timelines(gt); in intel_gt_common_init_early() [all …]
|
D | intel_gt_irq.c | 31 gen11_gt_engine_identity(struct intel_gt *gt, in gen11_gt_engine_identity() argument 34 void __iomem * const regs = intel_uncore_regs(gt->uncore); in gen11_gt_engine_identity() 38 lockdep_assert_held(gt->irq_lock); in gen11_gt_engine_identity() 53 gt_err(gt, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", in gen11_gt_engine_identity() 65 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, in gen11_other_irq_handler() argument 68 struct intel_gt *media_gt = gt->i915->media_gt; in gen11_other_irq_handler() 71 return guc_irq_handler(gt_to_guc(gt), iir); in gen11_other_irq_handler() 76 return gen11_rps_irq_handler(>->rps, iir); in gen11_other_irq_handler() 81 return intel_pxp_irq_handler(gt->i915->pxp, iir); in gen11_other_irq_handler() 84 return intel_gsc_irq_handler(gt, iir); in gen11_other_irq_handler() [all …]
|
D | intel_gt_pm.h | 14 static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt) in intel_gt_pm_is_awake() argument 16 return intel_wakeref_is_active(>->wakeref); in intel_gt_pm_is_awake() 19 static inline void intel_gt_pm_get_untracked(struct intel_gt *gt) in intel_gt_pm_get_untracked() argument 21 intel_wakeref_get(>->wakeref); in intel_gt_pm_get_untracked() 24 static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt) in intel_gt_pm_get() argument 26 intel_gt_pm_get_untracked(gt); in intel_gt_pm_get() 27 return intel_wakeref_track(>->wakeref); in intel_gt_pm_get() 30 static inline void __intel_gt_pm_get(struct intel_gt *gt) in __intel_gt_pm_get() argument 32 __intel_wakeref_get(>->wakeref); in __intel_gt_pm_get() 35 static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt) in intel_gt_pm_get_if_awake() argument [all …]
|
D | intel_reset.c | 15 #include "gt/intel_gt_regs.h" 17 #include "gt/uc/intel_gsc_fw.h" 158 static int i915_do_reset(struct intel_gt *gt, in i915_do_reset() argument 162 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in i915_do_reset() 187 static int g33_do_reset(struct intel_gt *gt, in g33_do_reset() argument 191 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g33_do_reset() 197 static int g4x_do_reset(struct intel_gt *gt, in g4x_do_reset() argument 201 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); in g4x_do_reset() 202 struct intel_uncore *uncore = gt->uncore; in g4x_do_reset() 213 GT_TRACE(gt, "Wait for media reset failed\n"); in g4x_do_reset() [all …]
|
D | intel_gt.h | 17 * Check that the GT is a graphics GT and has an IP version within the 20 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ argument 23 ((gt)->type != GT_MEDIA && \ 24 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \ 25 GRAPHICS_VER_FULL((gt)->i915) <= (until))) 28 * Check that the GT is a media GT and has an IP version within the 34 #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ argument 37 ((gt) && (gt)->type == GT_MEDIA && \ 38 MEDIA_VER_FULL((gt)->i915) >= (from) && \ 39 MEDIA_VER_FULL((gt)->i915) <= (until))) [all …]
|
D | intel_gt_mcr.c | 13 * DOC: GT Multicast/Replicated (MCR) Register Support 15 * Some GT registers are designed as "multicast" or "replicated" registers: 108 void intel_gt_mcr_init(struct intel_gt *gt) in intel_gt_mcr_init() argument 110 struct drm_i915_private *i915 = gt->i915; in intel_gt_mcr_init() 114 spin_lock_init(>->mcr_lock); in intel_gt_mcr_init() 121 gt->info.mslice_mask = in intel_gt_mcr_init() 122 intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask, in intel_gt_mcr_init() 124 gt->info.mslice_mask |= in intel_gt_mcr_init() 125 (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) & in intel_gt_mcr_init() 128 if (!gt->info.mslice_mask) /* should be impossible! */ in intel_gt_mcr_init() [all …]
|
D | intel_gt_pm_irq.c | 13 static void write_pm_imr(struct intel_gt *gt) in write_pm_imr() argument 15 struct drm_i915_private *i915 = gt->i915; in write_pm_imr() 16 struct intel_uncore *uncore = gt->uncore; in write_pm_imr() 17 u32 mask = gt->pm_imr; in write_pm_imr() 32 static void gen6_gt_pm_update_irq(struct intel_gt *gt, in gen6_gt_pm_update_irq() argument 40 lockdep_assert_held(gt->irq_lock); in gen6_gt_pm_update_irq() 42 new_val = gt->pm_imr; in gen6_gt_pm_update_irq() 46 if (new_val != gt->pm_imr) { in gen6_gt_pm_update_irq() 47 gt->pm_imr = new_val; in gen6_gt_pm_update_irq() 48 write_pm_imr(gt); in gen6_gt_pm_update_irq() [all …]
|
D | selftest_reset.c | 18 __igt_reset_stolen(struct intel_gt *gt, in __igt_reset_stolen() argument 22 struct i915_ggtt *ggtt = gt->ggtt; in __igt_reset_stolen() 23 const struct resource *dsm = >->i915->dsm.stolen; in __igt_reset_stolen() 51 igt_global_reset_lock(gt); in __igt_reset_stolen() 52 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in __igt_reset_stolen() 54 err = igt_spinner_init(&spin, gt); in __igt_reset_stolen() 58 for_each_engine(engine, gt, id) { in __igt_reset_stolen() 89 i915_gem_get_pat_index(gt->i915, in __igt_reset_stolen() 98 if (!__drm_mm_interval_first(>->i915->mm.stolen, in __igt_reset_stolen() 114 intel_gt_reset(gt, mask, NULL); in __igt_reset_stolen() [all …]
|
/linux-6.14.4/drivers/gpu/drm/xe/tests/ |
D | xe_gt_sriov_pf_service_test.c | 20 struct xe_gt *gt; in pf_service_test_init() local 28 gt = xe_device_get_gt(xe, 0); in pf_service_test_init() 29 pf_init_versions(gt); in pf_service_test_init() 36 KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major); in pf_service_test_init() 37 KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major); in pf_service_test_init() 38 KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major, in pf_service_test_init() 39 gt->sriov.pf.service.version.latest.major); in pf_service_test_init() 40 if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major) in pf_service_test_init() 41 KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor, in pf_service_test_init() 42 gt->sriov.pf.service.version.latest.minor); in pf_service_test_init() [all …]
|
/linux-6.14.4/drivers/gpu/drm/i915/ |
D | Makefile | 76 gt-y += \ 77 gt/gen2_engine_cs.o \ 78 gt/gen6_engine_cs.o \ 79 gt/gen6_ppgtt.o \ 80 gt/gen7_renderclear.o \ 81 gt/gen8_engine_cs.o \ 82 gt/gen8_ppgtt.o \ 83 gt/intel_breadcrumbs.o \ 84 gt/intel_context.o \ 85 gt/intel_context_sseu.o \ [all …]
|