Lines Matching +full:ext +full:- +full:regs

1 // SPDX-License-Identifier: MIT
3 * Copyright © 2023-2024 Intel Corporation
17 #include "regs/xe_engine_regs.h"
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_oa_regs.h"
64 const struct xe_oa_reg *regs; member
139 return tail >= head ? tail - head : in xe_oa_circ_diff()
140 tail + stream->oa_buffer.circ_size - head; in xe_oa_circ_diff()
145 return ptr + n >= stream->oa_buffer.circ_size ? in xe_oa_circ_incr()
146 ptr + n - stream->oa_buffer.circ_size : ptr + n; in xe_oa_circ_incr()
154 kfree(oa_config->regs); in xe_oa_config_release()
164 kref_put(&oa_config->ref, xe_oa_config_release); in xe_oa_config_put()
169 return kref_get_unless_zero(&oa_config->ref) ? oa_config : NULL; in xe_oa_config_get()
177 oa_config = idr_find(&oa->metrics_idr, metrics_set); in xe_oa_get_oa_config()
187 xe_oa_config_put(oa_bo->oa_config); in free_oa_config_bo()
188 xe_bb_free(oa_bo->bb, last_fence); in free_oa_config_bo()
194 return &stream->hwe->oa_unit->regs; in __oa_regs()
199 return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) & in xe_oa_hw_tail_read()
204 ((__s)->oa_buffer.format->header == HDR_64_BIT)
236 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_buffer_check_unlocked()
238 int report_size = stream->oa_buffer.format->size; in xe_oa_buffer_check_unlocked()
241 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_buffer_check_unlocked()
244 hw_tail -= gtt_offset; in xe_oa_buffer_check_unlocked()
251 partial_report_size = xe_oa_circ_diff(stream, hw_tail, stream->oa_buffer.tail); in xe_oa_buffer_check_unlocked()
267 while (xe_oa_circ_diff(stream, tail, stream->oa_buffer.tail) >= report_size) { in xe_oa_buffer_check_unlocked()
268 void *report = stream->oa_buffer.vaddr + tail; in xe_oa_buffer_check_unlocked()
277 drm_dbg(&stream->oa->xe->drm, in xe_oa_buffer_check_unlocked()
279 stream->oa_buffer.head, tail, hw_tail); in xe_oa_buffer_check_unlocked()
281 stream->oa_buffer.tail = tail; in xe_oa_buffer_check_unlocked()
283 available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head); in xe_oa_buffer_check_unlocked()
284 stream->pollin = available >= stream->wait_num_reports * report_size; in xe_oa_buffer_check_unlocked()
286 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_buffer_check_unlocked()
288 return stream->pollin; in xe_oa_buffer_check_unlocked()
297 wake_up(&stream->poll_wq); in xe_oa_poll_check_timer_cb()
299 hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns)); in xe_oa_poll_check_timer_cb()
307 int report_size = stream->oa_buffer.format->size; in xe_oa_append_report()
311 if ((count - *offset) < report_size) in xe_oa_append_report()
312 return -ENOSPC; in xe_oa_append_report()
316 oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size; in xe_oa_append_report()
317 report_size_partial = oa_buf_end - report; in xe_oa_append_report()
321 return -EFAULT; in xe_oa_append_report()
324 if (copy_to_user(buf, stream->oa_buffer.vaddr, in xe_oa_append_report()
325 report_size - report_size_partial)) in xe_oa_append_report()
326 return -EFAULT; in xe_oa_append_report()
328 return -EFAULT; in xe_oa_append_report()
339 int report_size = stream->oa_buffer.format->size; in xe_oa_append_reports()
340 u8 *oa_buf_base = stream->oa_buffer.vaddr; in xe_oa_append_reports()
341 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_append_reports()
347 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
348 head = stream->oa_buffer.head; in xe_oa_append_reports()
349 tail = stream->oa_buffer.tail; in xe_oa_append_reports()
350 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
352 xe_assert(stream->oa->xe, in xe_oa_append_reports()
353 head < stream->oa_buffer.circ_size && tail < stream->oa_buffer.circ_size); in xe_oa_append_reports()
363 if (!(stream->oa_buffer.circ_size % report_size)) { in xe_oa_append_reports()
368 u8 *oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size; in xe_oa_append_reports()
369 u32 part = oa_buf_end - report; in xe_oa_append_reports()
376 memset(oa_buf_base, 0, report_size - part); in xe_oa_append_reports()
382 struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr; in xe_oa_append_reports()
384 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
385 xe_mmio_write32(&stream->gt->mmio, oaheadptr, in xe_oa_append_reports()
387 stream->oa_buffer.head = head; in xe_oa_append_reports()
388 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_append_reports()
396 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); in xe_oa_init_oa_buffer()
397 int size_exponent = __ffs(stream->oa_buffer.bo->size); in xe_oa_init_oa_buffer()
399 struct xe_mmio *mmio = &stream->gt->mmio; in xe_oa_init_oa_buffer()
407 size_exponent > 24 ? size_exponent - 20 : size_exponent - 17); in xe_oa_init_oa_buffer()
409 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in xe_oa_init_oa_buffer()
411 xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0); in xe_oa_init_oa_buffer()
412 xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr, in xe_oa_init_oa_buffer()
414 stream->oa_buffer.head = 0; in xe_oa_init_oa_buffer()
419 xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf); in xe_oa_init_oa_buffer()
420 xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr, in xe_oa_init_oa_buffer()
424 stream->oa_buffer.tail = 0; in xe_oa_init_oa_buffer()
426 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in xe_oa_init_oa_buffer()
429 memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size); in xe_oa_init_oa_buffer()
434 return ((format->counter_select << (ffs(counter_sel_mask) - 1)) & counter_sel_mask) | in __format_to_oactrl()
435 REG_FIELD_PREP(OA_OACONTROL_REPORT_BC_MASK, format->bc_report) | in __format_to_oactrl()
436 REG_FIELD_PREP(OA_OACONTROL_COUNTER_SIZE_MASK, format->counter_size); in __format_to_oactrl()
443 if (stream->hwe->class != XE_ENGINE_CLASS_COMPUTE) in __oa_ccs_select()
446 val = REG_FIELD_PREP(OAG_OACONTROL_OA_CCS_SELECT_MASK, stream->hwe->instance); in __oa_ccs_select()
447 xe_assert(stream->oa->xe, in __oa_ccs_select()
448 REG_FIELD_GET(OAG_OACONTROL_OA_CCS_SELECT_MASK, val) == stream->hwe->instance); in __oa_ccs_select()
454 return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ? in __oactrl_used_bits()
460 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_enable()
461 const struct xe_oa_regs *regs; in xe_oa_enable() local
465 * BSpec: 46822: Bit 0. Even if stream->sample is 0, for OAR to function, the OA in xe_oa_enable()
470 regs = __oa_regs(stream); in xe_oa_enable()
471 val = __format_to_oactrl(format, regs->oa_ctrl_counter_select_mask) | in xe_oa_enable()
474 if (GRAPHICS_VER(stream->oa->xe) >= 20 && in xe_oa_enable()
475 stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) in xe_oa_enable()
478 xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val); in xe_oa_enable()
483 struct xe_mmio *mmio = &stream->gt->mmio; in xe_oa_disable()
485 xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0); in xe_oa_disable()
486 if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl, in xe_oa_disable()
488 drm_err(&stream->oa->xe->drm, in xe_oa_disable()
491 if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) { in xe_oa_disable()
495 drm_err(&stream->oa->xe->drm, in xe_oa_disable()
503 if (!stream->periodic) in xe_oa_wait_unlocked()
504 return -EINVAL; in xe_oa_wait_unlocked()
506 return wait_event_interruptible(stream->poll_wq, in xe_oa_wait_unlocked()
516 /* Only clear our bits to avoid side-effects */ in __xe_oa_read()
517 stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status, in __xe_oa_read()
520 * Signal to userspace that there is non-zero OA status to read via in __xe_oa_read()
523 if (stream->oa_status & OASTATUS_RELEVANT_BITS) in __xe_oa_read()
524 return -EIO; in __xe_oa_read()
532 struct xe_oa_stream *stream = file->private_data; in xe_oa_read()
537 if (!stream->enabled || !stream->sample) in xe_oa_read()
538 return -EINVAL; in xe_oa_read()
540 if (!(file->f_flags & O_NONBLOCK)) { in xe_oa_read()
546 mutex_lock(&stream->stream_lock); in xe_oa_read()
548 mutex_unlock(&stream->stream_lock); in xe_oa_read()
551 mutex_lock(&stream->stream_lock); in xe_oa_read()
553 mutex_unlock(&stream->stream_lock); in xe_oa_read()
558 * before unblocking. The exception to this is if __xe_oa_read returns -ENOSPC, in xe_oa_read()
562 * Also in case of -EIO, we have already waited for data before returning in xe_oa_read()
563 * -EIO, so need to wait again in xe_oa_read()
565 if (ret != -ENOSPC && ret != -EIO) in xe_oa_read()
566 stream->pollin = false; in xe_oa_read()
568 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, -EINVAL, ... */ in xe_oa_read()
569 return offset ?: (ret ?: -EAGAIN); in xe_oa_read()
577 poll_wait(file, &stream->poll_wq, wait); in xe_oa_poll_locked()
585 if (stream->pollin) in xe_oa_poll_locked()
593 struct xe_oa_stream *stream = file->private_data; in xe_oa_poll()
596 mutex_lock(&stream->stream_lock); in xe_oa_poll()
598 mutex_unlock(&stream->stream_lock); in xe_oa_poll()
605 if (q->vm) { in xe_oa_lock_vma()
606 down_read(&q->vm->lock); in xe_oa_lock_vma()
607 xe_vm_lock(q->vm, false); in xe_oa_lock_vma()
613 if (q->vm) { in xe_oa_unlock_vma()
614 xe_vm_unlock(q->vm); in xe_oa_unlock_vma()
615 up_read(&q->vm->lock); in xe_oa_unlock_vma()
622 struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q; in xe_oa_submit_bb()
634 job->ggtt = true; in xe_oa_submit_bb()
637 for (int i = 0; i < stream->num_syncs && !err; i++) in xe_oa_submit_bb()
638 err = xe_sync_entry_add_deps(&stream->syncs[i], job); in xe_oa_submit_bb()
640 drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err); in xe_oa_submit_bb()
646 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_oa_submit_bb()
667 u32 n_lri = min_t(u32, n_regs - i, in write_cs_mi_lri()
670 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(n_lri); in write_cs_mi_lri()
672 bb->cs[bb->len++] = reg_data[i].addr.addr; in write_cs_mi_lri()
673 bb->cs[bb->len++] = reg_data[i].value; in write_cs_mi_lri()
691 xe_bo_unpin_map_no_vm(stream->oa_buffer.bo); in xe_oa_free_oa_buffer()
698 xe_oa_config_put(stream->oa_config); in xe_oa_free_configs()
699 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) in xe_oa_free_configs()
700 free_oa_config_bo(oa_bo, stream->last_fence); in xe_oa_free_configs()
701 dma_fence_put(stream->last_fence); in xe_oa_free_configs()
710 bb = xe_bb_new(stream->gt, 2 * count + 1, false); in xe_oa_load_with_lri()
735 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_configure_oar_context()
741 OACTXCONTROL(stream->hwe->mmio_base), in xe_oa_configure_oar_context()
749 RING_CONTEXT_CONTROL(stream->hwe->mmio_base), in xe_oa_configure_oar_context()
760 const struct xe_oa_format *format = stream->oa_buffer.format; in xe_oa_configure_oac_context()
765 OACTXCONTROL(stream->hwe->mmio_base), in xe_oa_configure_oac_context()
773 RING_CONTEXT_CONTROL(stream->hwe->mmio_base), in xe_oa_configure_oac_context()
781 xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl, in xe_oa_configure_oac_context()
789 switch (stream->hwe->class) { in xe_oa_configure_oa_context()
805 enable && stream && stream->sample ? in oag_configure_mmio_trigger()
811 struct xe_mmio *mmio = &stream->gt->mmio; in xe_oa_disable_metric_set()
818 if (stream->oa->xe->info.platform == XE_DG2) { in xe_oa_disable_metric_set()
819 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN, in xe_oa_disable_metric_set()
821 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2, in xe_oa_disable_metric_set()
825 xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, in xe_oa_disable_metric_set()
829 if (stream->exec_q) in xe_oa_disable_metric_set()
836 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); in xe_oa_disable_metric_set()
844 struct xe_oa_unit *u = stream->hwe->oa_unit; in xe_oa_stream_destroy()
845 struct xe_gt *gt = stream->hwe->gt; in xe_oa_stream_destroy()
847 if (WARN_ON(stream != u->exclusive_stream)) in xe_oa_stream_destroy()
850 WRITE_ONCE(u->exclusive_stream, NULL); in xe_oa_stream_destroy()
852 mutex_destroy(&stream->stream_lock); in xe_oa_stream_destroy()
855 xe_exec_queue_put(stream->k_exec_q); in xe_oa_stream_destroy()
860 xe_pm_runtime_put(stream->oa->xe); in xe_oa_stream_destroy()
863 if (stream->override_gucrc) in xe_oa_stream_destroy()
864 xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc)); in xe_oa_stream_destroy()
867 xe_file_put(stream->xef); in xe_oa_stream_destroy()
874 bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL, in xe_oa_alloc_oa_buffer()
880 stream->oa_buffer.bo = bo; in xe_oa_alloc_oa_buffer()
882 xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0); in xe_oa_alloc_oa_buffer()
883 stream->oa_buffer.vaddr = bo->vmap.vaddr; in xe_oa_alloc_oa_buffer()
896 return ERR_PTR(-ENOMEM); in __xe_oa_alloc_config_buffer()
898 config_length = num_lri_dwords(oa_config->regs_len); in __xe_oa_alloc_config_buffer()
901 bb = xe_bb_new(stream->gt, config_length, false); in __xe_oa_alloc_config_buffer()
905 write_cs_mi_lri(bb, oa_config->regs, oa_config->regs_len); in __xe_oa_alloc_config_buffer()
907 oa_bo->bb = bb; in __xe_oa_alloc_config_buffer()
908 oa_bo->oa_config = xe_oa_config_get(oa_config); in __xe_oa_alloc_config_buffer()
909 llist_add(&oa_bo->node, &stream->oa_config_bos); in __xe_oa_alloc_config_buffer()
923 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { in xe_oa_alloc_config_buffer()
924 if (oa_bo->oa_config == oa_config && in xe_oa_alloc_config_buffer()
925 memcmp(oa_bo->oa_config->uuid, oa_config->uuid, in xe_oa_alloc_config_buffer()
926 sizeof(oa_config->uuid)) == 0) in xe_oa_alloc_config_buffer()
937 dma_fence_put(stream->last_fence); in xe_oa_update_last_fence()
938 stream->last_fence = dma_fence_get(fence); in xe_oa_update_last_fence()
946 dma_fence_signal(&ofence->base); in xe_oa_fence_work_fn()
947 dma_fence_put(&ofence->base); in xe_oa_fence_work_fn()
957 INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn); in xe_oa_config_cb()
958 queue_delayed_work(system_unbound_wq, &ofence->work, in xe_oa_config_cb()
988 err = -ENOMEM; in xe_oa_emit_oa_config()
999 fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb); in xe_oa_emit_oa_config()
1006 spin_lock_init(&ofence->lock); in xe_oa_emit_oa_config()
1007 dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0); in xe_oa_emit_oa_config()
1009 for (i = 0; i < stream->num_syncs; i++) { in xe_oa_emit_oa_config()
1010 if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL) in xe_oa_emit_oa_config()
1012 xe_sync_entry_signal(&stream->syncs[i], &ofence->base); in xe_oa_emit_oa_config()
1017 dma_fence_get(&ofence->base); in xe_oa_emit_oa_config()
1022 /* Add job fence callback to schedule work to signal ofence->base */ in xe_oa_emit_oa_config()
1023 err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb); in xe_oa_emit_oa_config()
1024 xe_gt_assert(stream->gt, !err || err == -ENOENT); in xe_oa_emit_oa_config()
1025 if (err == -ENOENT) in xe_oa_emit_oa_config()
1026 xe_oa_config_cb(fence, &ofence->cb); in xe_oa_emit_oa_config()
1030 dma_fence_wait(&ofence->base, false); in xe_oa_emit_oa_config()
1031 dma_fence_put(&ofence->base); in xe_oa_emit_oa_config()
1035 for (i = 0; i < stream->num_syncs; i++) in xe_oa_emit_oa_config()
1036 xe_sync_entry_cleanup(&stream->syncs[i]); in xe_oa_emit_oa_config()
1037 kfree(stream->syncs); in xe_oa_emit_oa_config()
1049 stream->sample ? in oag_report_ctx_switches()
1056 stream->oa_buffer.bo->size > SZ_16M ? in oag_buf_size_select()
1062 struct xe_mmio *mmio = &stream->gt->mmio; in xe_oa_enable_metric_set()
1071 if (stream->oa->xe->info.platform == XE_DG2) { in xe_oa_enable_metric_set()
1072 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN, in xe_oa_enable_metric_set()
1074 xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2, in xe_oa_enable_metric_set()
1082 if (GRAPHICS_VER(stream->oa->xe) >= 20) in xe_oa_enable_metric_set()
1089 xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug, in xe_oa_enable_metric_set()
1095 xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ? in xe_oa_enable_metric_set()
1099 stream->period_exponent)) : 0); in xe_oa_enable_metric_set()
1107 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0); in xe_oa_enable_metric_set()
1112 if (stream->exec_q) { in xe_oa_enable_metric_set()
1118 return xe_oa_emit_oa_config(stream, stream->oa_config); in xe_oa_enable_metric_set()
1129 for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) { in decode_oa_format()
1130 const struct xe_oa_format *f = &oa->oa_formats[idx]; in decode_oa_format()
1132 if (counter_size == f->counter_size && bc_report == f->bc_report && in decode_oa_format()
1133 type == f->type && counter_sel == f->counter_select) { in decode_oa_format()
1139 return -EINVAL; in decode_oa_format()
1145 if (value >= oa->oa_unit_ids) { in xe_oa_set_prop_oa_unit_id()
1146 drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value); in xe_oa_set_prop_oa_unit_id()
1147 return -EINVAL; in xe_oa_set_prop_oa_unit_id()
1149 param->oa_unit_id = value; in xe_oa_set_prop_oa_unit_id()
1156 param->sample = value; in xe_oa_set_prop_sample_oa()
1163 param->metric_set = value; in xe_oa_set_prop_metric_set()
1170 int ret = decode_oa_format(oa, value, &param->oa_format); in xe_oa_set_prop_oa_format()
1173 drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value); in xe_oa_set_prop_oa_format()
1185 drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX); in xe_oa_set_prop_oa_exponent()
1186 return -EINVAL; in xe_oa_set_prop_oa_exponent()
1188 param->period_exponent = value; in xe_oa_set_prop_oa_exponent()
1195 param->disabled = value; in xe_oa_set_prop_disabled()
1202 param->exec_queue_id = value; in xe_oa_set_prop_exec_queue_id()
1209 param->engine_instance = value; in xe_oa_set_prop_engine_instance()
1216 param->no_preempt = value; in xe_oa_set_no_preempt()
1223 param->num_syncs = value; in xe_oa_set_prop_num_syncs()
1230 param->syncs_user = u64_to_user_ptr(value); in xe_oa_set_prop_syncs_user()
1238 drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value); in xe_oa_set_prop_oa_buffer_size()
1239 return -EINVAL; in xe_oa_set_prop_oa_buffer_size()
1241 param->oa_buffer_size = value; in xe_oa_set_prop_oa_buffer_size()
1249 drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value); in xe_oa_set_prop_wait_num_reports()
1250 return -EINVAL; in xe_oa_set_prop_wait_num_reports()
1252 param->wait_num_reports = value; in xe_oa_set_prop_wait_num_reports()
1259 return -EINVAL; in xe_oa_set_prop_ret_inval()
1300 struct drm_xe_ext_set_property ext; in xe_oa_user_ext_set_property() local
1304 err = __copy_from_user(&ext, address, sizeof(ext)); in xe_oa_user_ext_set_property()
1305 if (XE_IOCTL_DBG(oa->xe, err)) in xe_oa_user_ext_set_property()
1306 return -EFAULT; in xe_oa_user_ext_set_property()
1311 if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || in xe_oa_user_ext_set_property()
1312 XE_IOCTL_DBG(oa->xe, ext.pad)) in xe_oa_user_ext_set_property()
1313 return -EINVAL; in xe_oa_user_ext_set_property()
1315 idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); in xe_oa_user_ext_set_property()
1318 return xe_oa_set_property_funcs_config[idx](oa, ext.value, param); in xe_oa_user_ext_set_property()
1320 return xe_oa_set_property_funcs_open[idx](oa, ext.value, param); in xe_oa_user_ext_set_property()
1334 struct drm_xe_user_extension ext; in xe_oa_user_extensions() local
1338 if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS)) in xe_oa_user_extensions()
1339 return -E2BIG; in xe_oa_user_extensions()
1341 err = __copy_from_user(&ext, address, sizeof(ext)); in xe_oa_user_extensions()
1342 if (XE_IOCTL_DBG(oa->xe, err)) in xe_oa_user_extensions()
1343 return -EFAULT; in xe_oa_user_extensions()
1345 if (XE_IOCTL_DBG(oa->xe, ext.pad) || in xe_oa_user_extensions()
1346 XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs))) in xe_oa_user_extensions()
1347 return -EINVAL; in xe_oa_user_extensions()
1349 idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs)); in xe_oa_user_extensions()
1351 if (XE_IOCTL_DBG(oa->xe, err)) in xe_oa_user_extensions()
1354 if (ext.next_extension) in xe_oa_user_extensions()
1355 return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param); in xe_oa_user_extensions()
1364 if (param->num_syncs && !param->syncs_user) { in xe_oa_parse_syncs()
1365 drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n"); in xe_oa_parse_syncs()
1366 ret = -EINVAL; in xe_oa_parse_syncs()
1370 if (param->num_syncs) { in xe_oa_parse_syncs()
1371 param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL); in xe_oa_parse_syncs()
1372 if (!param->syncs) { in xe_oa_parse_syncs()
1373 ret = -ENOMEM; in xe_oa_parse_syncs()
1378 for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) { in xe_oa_parse_syncs()
1379 ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs], in xe_oa_parse_syncs()
1380 &param->syncs_user[num_syncs], 0); in xe_oa_parse_syncs()
1384 if (xe_sync_is_ufence(&param->syncs[num_syncs])) in xe_oa_parse_syncs()
1388 if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) { in xe_oa_parse_syncs()
1389 ret = -EINVAL; in xe_oa_parse_syncs()
1396 while (num_syncs--) in xe_oa_parse_syncs()
1397 xe_sync_entry_cleanup(&param->syncs[num_syncs]); in xe_oa_parse_syncs()
1398 kfree(param->syncs); in xe_oa_parse_syncs()
1405 stream->pollin = false; in xe_oa_stream_enable()
1409 if (stream->sample) in xe_oa_stream_enable()
1410 hrtimer_start(&stream->poll_check_timer, in xe_oa_stream_enable()
1411 ns_to_ktime(stream->poll_period_ns), in xe_oa_stream_enable()
1419 if (stream->sample) in xe_oa_stream_disable()
1420 hrtimer_cancel(&stream->poll_check_timer); in xe_oa_stream_disable()
1425 struct xe_exec_queue *q = stream->exec_q; in xe_oa_enable_preempt_timeslice()
1429 ret1 = q->ops->set_timeslice(q, stream->hwe->eclass->sched_props.timeslice_us); in xe_oa_enable_preempt_timeslice()
1430 ret2 = q->ops->set_preempt_timeout(q, stream->hwe->eclass->sched_props.preempt_timeout_us); in xe_oa_enable_preempt_timeslice()
1435 drm_dbg(&stream->oa->xe->drm, "%s failed ret1 %d ret2 %d\n", __func__, ret1, ret2); in xe_oa_enable_preempt_timeslice()
1441 struct xe_exec_queue *q = stream->exec_q; in xe_oa_disable_preempt_timeslice()
1445 ret = q->ops->set_timeslice(q, 0); in xe_oa_disable_preempt_timeslice()
1449 ret = q->ops->set_preempt_timeout(q, 0); in xe_oa_disable_preempt_timeslice()
1456 drm_dbg(&stream->oa->xe->drm, "%s failed %d\n", __func__, ret); in xe_oa_disable_preempt_timeslice()
1462 if (stream->enabled) in xe_oa_enable_locked()
1465 if (stream->no_preempt) { in xe_oa_enable_locked()
1474 stream->enabled = true; in xe_oa_enable_locked()
1482 if (!stream->enabled) in xe_oa_disable_locked()
1487 if (stream->no_preempt) in xe_oa_disable_locked()
1490 stream->enabled = false; in xe_oa_disable_locked()
1497 long ret = stream->oa_config->id; in xe_oa_config_locked()
1501 err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, &param); in xe_oa_config_locked()
1505 config = xe_oa_get_oa_config(stream->oa, param.metric_set); in xe_oa_config_locked()
1507 return -ENODEV; in xe_oa_config_locked()
1509 param.xef = stream->xef; in xe_oa_config_locked()
1510 err = xe_oa_parse_syncs(stream->oa, &param); in xe_oa_config_locked()
1514 stream->num_syncs = param.num_syncs; in xe_oa_config_locked()
1515 stream->syncs = param.syncs; in xe_oa_config_locked()
1519 config = xchg(&stream->oa_config, config); in xe_oa_config_locked()
1520 drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n", in xe_oa_config_locked()
1521 stream->oa_config->uuid); in xe_oa_config_locked()
1536 if (stream->oa_status & OASTATUS_REPORT_LOST) in xe_oa_status_locked()
1538 if (stream->oa_status & OASTATUS_BUFFER_OVERFLOW) in xe_oa_status_locked()
1540 if (stream->oa_status & OASTATUS_COUNTER_OVERFLOW) in xe_oa_status_locked()
1542 if (stream->oa_status & OASTATUS_MMIO_TRG_Q_FULL) in xe_oa_status_locked()
1546 return -EFAULT; in xe_oa_status_locked()
1553 struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, }; in xe_oa_info_locked()
1557 return -EFAULT; in xe_oa_info_locked()
1579 return -EINVAL; in xe_oa_ioctl_locked()
1586 struct xe_oa_stream *stream = file->private_data; in xe_oa_ioctl()
1589 mutex_lock(&stream->stream_lock); in xe_oa_ioctl()
1591 mutex_unlock(&stream->stream_lock); in xe_oa_ioctl()
1598 if (stream->enabled) in xe_oa_destroy_locked()
1603 if (stream->exec_q) in xe_oa_destroy_locked()
1604 xe_exec_queue_put(stream->exec_q); in xe_oa_destroy_locked()
1611 struct xe_oa_stream *stream = file->private_data; in xe_oa_release()
1612 struct xe_gt *gt = stream->gt; in xe_oa_release()
1615 mutex_lock(&gt->oa.gt_lock); in xe_oa_release()
1617 mutex_unlock(&gt->oa.gt_lock); in xe_oa_release()
1621 drm_dev_put(&gt_to_xe(gt)->drm); in xe_oa_release()
1628 struct xe_oa_stream *stream = file->private_data; in xe_oa_mmap()
1629 struct xe_bo *bo = stream->oa_buffer.bo; in xe_oa_mmap()
1630 unsigned long start = vma->vm_start; in xe_oa_mmap()
1634 drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n"); in xe_oa_mmap()
1635 return -EACCES; in xe_oa_mmap()
1639 if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) { in xe_oa_mmap()
1640 drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n"); in xe_oa_mmap()
1641 return -EINVAL; in xe_oa_mmap()
1648 if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) { in xe_oa_mmap()
1649 drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n"); in xe_oa_mmap()
1650 return -EINVAL; in xe_oa_mmap()
1655 xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma)); in xe_oa_mmap()
1656 for (i = 0; i < bo->ttm.ttm->num_pages; i++) { in xe_oa_mmap()
1657 ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]), in xe_oa_mmap()
1658 PAGE_SIZE, vma->vm_page_prot); in xe_oa_mmap()
1680 struct xe_oa_unit *u = param->hwe->oa_unit; in xe_oa_stream_init()
1681 struct xe_gt *gt = param->hwe->gt; in xe_oa_stream_init()
1685 stream->exec_q = param->exec_q; in xe_oa_stream_init()
1686 stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS; in xe_oa_stream_init()
1687 stream->hwe = param->hwe; in xe_oa_stream_init()
1688 stream->gt = stream->hwe->gt; in xe_oa_stream_init()
1689 stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format]; in xe_oa_stream_init()
1691 stream->sample = param->sample; in xe_oa_stream_init()
1692 stream->periodic = param->period_exponent >= 0; in xe_oa_stream_init()
1693 stream->period_exponent = param->period_exponent; in xe_oa_stream_init()
1694 stream->no_preempt = param->no_preempt; in xe_oa_stream_init()
1695 stream->wait_num_reports = param->wait_num_reports; in xe_oa_stream_init()
1697 stream->xef = xe_file_get(param->xef); in xe_oa_stream_init()
1698 stream->num_syncs = param->num_syncs; in xe_oa_stream_init()
1699 stream->syncs = param->syncs; in xe_oa_stream_init()
1703 * of buffer, making the OA buffer effectively a non-power-of-2 size circular in xe_oa_stream_init()
1706 if (GRAPHICS_VER(stream->oa->xe) >= 20 && in xe_oa_stream_init()
1707 stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample) in xe_oa_stream_init()
1708 stream->oa_buffer.circ_size = in xe_oa_stream_init()
1709 param->oa_buffer_size - in xe_oa_stream_init()
1710 param->oa_buffer_size % stream->oa_buffer.format->size; in xe_oa_stream_init()
1712 stream->oa_buffer.circ_size = param->oa_buffer_size; in xe_oa_stream_init()
1714 stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set); in xe_oa_stream_init()
1715 if (!stream->oa_config) { in xe_oa_stream_init()
1716 drm_dbg(&stream->oa->xe->drm, "Invalid OA config id=%i\n", param->metric_set); in xe_oa_stream_init()
1717 ret = -EINVAL; in xe_oa_stream_init()
1727 if (stream->oa->xe->info.platform == XE_PVC) { in xe_oa_stream_init()
1728 ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc, in xe_oa_stream_init()
1733 stream->override_gucrc = true; in xe_oa_stream_init()
1737 xe_pm_runtime_get(stream->oa->xe); in xe_oa_stream_init()
1740 ret = -ETIMEDOUT; in xe_oa_stream_init()
1744 ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size); in xe_oa_stream_init()
1748 stream->k_exec_q = xe_exec_queue_create(stream->oa->xe, NULL, in xe_oa_stream_init()
1749 BIT(stream->hwe->logical_instance), 1, in xe_oa_stream_init()
1750 stream->hwe, EXEC_QUEUE_FLAG_KERNEL, 0); in xe_oa_stream_init()
1751 if (IS_ERR(stream->k_exec_q)) { in xe_oa_stream_init()
1752 ret = PTR_ERR(stream->k_exec_q); in xe_oa_stream_init()
1753 drm_err(&stream->oa->xe->drm, "gt%d, hwe %s, xe_exec_queue_create failed=%d", in xe_oa_stream_init()
1754 stream->gt->info.id, stream->hwe->name, ret); in xe_oa_stream_init()
1760 drm_dbg(&stream->oa->xe->drm, "Unable to enable metric set\n"); in xe_oa_stream_init()
1764 drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n", in xe_oa_stream_init()
1765 stream->oa_config->uuid); in xe_oa_stream_init()
1767 WRITE_ONCE(u->exclusive_stream, stream); in xe_oa_stream_init()
1769 hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in xe_oa_stream_init()
1770 stream->poll_check_timer.function = xe_oa_poll_check_timer_cb; in xe_oa_stream_init()
1771 init_waitqueue_head(&stream->poll_wq); in xe_oa_stream_init()
1773 spin_lock_init(&stream->oa_buffer.ptr_lock); in xe_oa_stream_init()
1774 mutex_init(&stream->stream_lock); in xe_oa_stream_init()
1780 xe_exec_queue_put(stream->k_exec_q); in xe_oa_stream_init()
1785 xe_pm_runtime_put(stream->oa->xe); in xe_oa_stream_init()
1786 if (stream->override_gucrc) in xe_oa_stream_init()
1787 xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc)); in xe_oa_stream_init()
1791 xe_file_put(stream->xef); in xe_oa_stream_init()
1803 if (param->hwe->oa_unit->exclusive_stream) { in xe_oa_stream_open_ioctl_locked()
1804 drm_dbg(&oa->xe->drm, "OA unit already in use\n"); in xe_oa_stream_open_ioctl_locked()
1805 ret = -EBUSY; in xe_oa_stream_open_ioctl_locked()
1811 ret = -ENOMEM; in xe_oa_stream_open_ioctl_locked()
1815 stream->oa = oa; in xe_oa_stream_open_ioctl_locked()
1820 if (!param->disabled) { in xe_oa_stream_open_ioctl_locked()
1833 drm_dev_get(&stream->oa->xe->drm); in xe_oa_stream_open_ioctl_locked()
1837 if (!param->disabled) in xe_oa_stream_open_ioctl_locked()
1848 * xe_oa_timestamp_frequency - Return OA timestamp frequency
1864 switch (gt_to_xe(gt)->info.platform) { in xe_oa_timestamp_frequency()
1869 reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0); in xe_oa_timestamp_frequency()
1873 return gt->info.reference_clock << (3 - shift); in xe_oa_timestamp_frequency()
1876 return gt->info.reference_clock; in xe_oa_timestamp_frequency()
1885 return div_u64(nom + den - 1, den); in oa_exponent_to_ns()
1890 switch (hwe->oa_unit->type) { in engine_supports_oa_format()
1902 * xe_oa_unit_id - Return OA unit ID for a hardware engine
1909 return hwe->oa_unit && hwe->oa_unit->num_engines ? in xe_oa_unit_id()
1910 hwe->oa_unit->oa_unit_id : U16_MAX; in xe_oa_unit_id()
1918 if (param->exec_q) { in xe_oa_assign_hwe()
1920 param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class, in xe_oa_assign_hwe()
1921 param->engine_instance, true); in xe_oa_assign_hwe()
1927 for_each_gt(gt, oa->xe, i) { in xe_oa_assign_hwe()
1929 if (xe_oa_unit_id(hwe) == param->oa_unit_id) { in xe_oa_assign_hwe()
1930 param->hwe = hwe; in xe_oa_assign_hwe()
1937 if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) { in xe_oa_assign_hwe()
1938 drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n", in xe_oa_assign_hwe()
1939 param->exec_q ? param->exec_q->class : -1, in xe_oa_assign_hwe()
1940 param->engine_instance, param->oa_unit_id); in xe_oa_assign_hwe()
1941 ret = -EINVAL; in xe_oa_assign_hwe()
1948 * xe_oa_stream_open_ioctl - Opens an OA stream
1960 struct xe_oa *oa = &xe->oa; in xe_oa_stream_open_ioctl()
1967 if (!oa->xe) { in xe_oa_stream_open_ioctl()
1968 drm_dbg(&xe->drm, "xe oa interface not available for this system\n"); in xe_oa_stream_open_ioctl()
1969 return -ENODEV; in xe_oa_stream_open_ioctl()
1973 param.period_exponent = -1; in xe_oa_stream_open_ioctl()
1980 if (XE_IOCTL_DBG(oa->xe, !param.exec_q)) in xe_oa_stream_open_ioctl()
1981 return -ENOENT; in xe_oa_stream_open_ioctl()
1983 if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1)) in xe_oa_stream_open_ioctl()
1984 return -EOPNOTSUPP; in xe_oa_stream_open_ioctl()
1996 drm_dbg(&oa->xe->drm, "Preemption disable without exec_q!\n"); in xe_oa_stream_open_ioctl()
1997 ret = -EINVAL; in xe_oa_stream_open_ioctl()
2004 drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe OA stream\n"); in xe_oa_stream_open_ioctl()
2005 ret = -EACCES; in xe_oa_stream_open_ioctl()
2010 drm_dbg(&oa->xe->drm, "Only OA report sampling supported\n"); in xe_oa_stream_open_ioctl()
2011 ret = -EINVAL; in xe_oa_stream_open_ioctl()
2019 f = &oa->oa_formats[param.oa_format]; in xe_oa_stream_open_ioctl()
2020 if (!param.oa_format || !f->size || in xe_oa_stream_open_ioctl()
2021 !engine_supports_oa_format(param.hwe, f->type)) { in xe_oa_stream_open_ioctl()
2022 drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n", in xe_oa_stream_open_ioctl()
2023 param.oa_format, f->type, f->size, param.hwe->class); in xe_oa_stream_open_ioctl()
2024 ret = -EINVAL; in xe_oa_stream_open_ioctl()
2033 drm_dbg(&oa->xe->drm, "OA_EXPONENT specified without SAMPLE_OA\n"); in xe_oa_stream_open_ioctl()
2034 ret = -EINVAL; in xe_oa_stream_open_ioctl()
2037 oa_period = oa_exponent_to_ns(param.hwe->gt, param.period_exponent); in xe_oa_stream_open_ioctl()
2039 drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz); in xe_oa_stream_open_ioctl()
2047 if (param.wait_num_reports > param.oa_buffer_size / f->size) { in xe_oa_stream_open_ioctl()
2048 drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports); in xe_oa_stream_open_ioctl()
2049 ret = -EINVAL; in xe_oa_stream_open_ioctl()
2057 mutex_lock(&param.hwe->gt->oa.gt_lock); in xe_oa_stream_open_ioctl()
2059 mutex_unlock(&param.hwe->gt->oa.gt_lock); in xe_oa_stream_open_ioctl()
2066 while (param.num_syncs--) in xe_oa_stream_open_ioctl()
2097 while (table->start && table->end) { in xe_oa_reg_in_range_table()
2098 if (addr >= table->start && addr <= table->end) in xe_oa_reg_in_range_table()
2109 { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
2115 { .start = 0xd900, .end = 0xd91c }, /* OAG_OASTARTTRIG[1-8] */
2116 { .start = 0xd920, .end = 0xd93c }, /* OAG_OAREPORTTRIG1[1-8] */
2117 { .start = 0xd940, .end = 0xd97c }, /* OAG_CEC[0-7][0-1] */
2118 { .start = 0xdc00, .end = 0xdc3c }, /* OAG_SCEC[0-7][0-1] */
2125 { .start = 0x393000, .end = 0x39301c }, /* OAM_STARTTRIG1[1-8] */
2126 { .start = 0x393020, .end = 0x39303c }, /* OAM_REPORTTRIG1[1-8] */
2127 { .start = 0x393040, .end = 0x39307c }, /* OAM_CEC[0-7][0-1] */
2128 { .start = 0x393200, .end = 0x39323C }, /* MPES[0-7] */
2133 { .start = 0x393200, .end = 0x39323C }, /* MPES_0_MPES_SAG - MPES_7_UPPER_MPES_SAG */
2134 { .start = 0x394200, .end = 0x39423C }, /* MPES_0_MPES_SCMI0 - MPES_7_UPPER_MPES_SCMI0 */
2135 { .start = 0x394A00, .end = 0x394A3C }, /* MPES_0_MPES_SCMI1 - MPES_7_UPPER_MPES_SCMI1 */
2144 (GRAPHICS_VER(oa->xe) >= 20 && in xe_oa_is_valid_b_counter_addr()
2149 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
2150 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
2158 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
2159 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
2173 { .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
2179 if (GRAPHICS_VER(oa->xe) >= 20) in xe_oa_is_valid_mux_addr()
2181 else if (GRAPHICS_VERx100(oa->xe) >= 1270) in xe_oa_is_valid_mux_addr()
2196 u32 __user *regs, u32 n_regs) in xe_oa_alloc_regs() argument
2204 return ERR_PTR(-ENOMEM); in xe_oa_alloc_regs()
2209 err = get_user(addr, regs); in xe_oa_alloc_regs()
2214 drm_dbg(&oa->xe->drm, "Invalid oa_reg address: %X\n", addr); in xe_oa_alloc_regs()
2215 err = -EINVAL; in xe_oa_alloc_regs()
2219 err = get_user(value, regs + 1); in xe_oa_alloc_regs()
2226 regs += 2; in xe_oa_alloc_regs()
2243 return sysfs_emit(buf, "%d\n", oa_config->id); in show_dynamic_id()
2249 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); in create_dynamic_oa_sysfs_entry()
2250 oa_config->sysfs_metric_id.attr.name = "id"; in create_dynamic_oa_sysfs_entry()
2251 oa_config->sysfs_metric_id.attr.mode = 0444; in create_dynamic_oa_sysfs_entry()
2252 oa_config->sysfs_metric_id.show = show_dynamic_id; in create_dynamic_oa_sysfs_entry()
2253 oa_config->sysfs_metric_id.store = NULL; in create_dynamic_oa_sysfs_entry()
2255 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; in create_dynamic_oa_sysfs_entry()
2256 oa_config->attrs[1] = NULL; in create_dynamic_oa_sysfs_entry()
2258 oa_config->sysfs_metric.name = oa_config->uuid; in create_dynamic_oa_sysfs_entry()
2259 oa_config->sysfs_metric.attrs = oa_config->attrs; in create_dynamic_oa_sysfs_entry()
2261 return sysfs_create_group(oa->metrics_kobj, &oa_config->sysfs_metric); in create_dynamic_oa_sysfs_entry()
2265 * xe_oa_add_config_ioctl - Adds one OA config
2277 struct xe_oa *oa = &xe->oa; in xe_oa_add_config_ioctl()
2281 struct xe_oa_reg *regs; in xe_oa_add_config_ioctl() local
2284 if (!oa->xe) { in xe_oa_add_config_ioctl()
2285 drm_dbg(&xe->drm, "xe oa interface not available for this system\n"); in xe_oa_add_config_ioctl()
2286 return -ENODEV; in xe_oa_add_config_ioctl()
2290 drm_dbg(&oa->xe->drm, "Insufficient privileges to add xe OA config\n"); in xe_oa_add_config_ioctl()
2291 return -EACCES; in xe_oa_add_config_ioctl()
2295 if (XE_IOCTL_DBG(oa->xe, err)) in xe_oa_add_config_ioctl()
2296 return -EFAULT; in xe_oa_add_config_ioctl()
2298 if (XE_IOCTL_DBG(oa->xe, arg->extensions) || in xe_oa_add_config_ioctl()
2299 XE_IOCTL_DBG(oa->xe, !arg->regs_ptr) || in xe_oa_add_config_ioctl()
2300 XE_IOCTL_DBG(oa->xe, !arg->n_regs)) in xe_oa_add_config_ioctl()
2301 return -EINVAL; in xe_oa_add_config_ioctl()
2305 return -ENOMEM; in xe_oa_add_config_ioctl()
2307 oa_config->oa = oa; in xe_oa_add_config_ioctl()
2308 kref_init(&oa_config->ref); in xe_oa_add_config_ioctl()
2310 if (!uuid_is_valid(arg->uuid)) { in xe_oa_add_config_ioctl()
2311 drm_dbg(&oa->xe->drm, "Invalid uuid format for OA config\n"); in xe_oa_add_config_ioctl()
2312 err = -EINVAL; in xe_oa_add_config_ioctl()
2316 /* Last character in oa_config->uuid will be 0 because oa_config is kzalloc */ in xe_oa_add_config_ioctl()
2317 memcpy(oa_config->uuid, arg->uuid, sizeof(arg->uuid)); in xe_oa_add_config_ioctl()
2319 oa_config->regs_len = arg->n_regs; in xe_oa_add_config_ioctl()
2320 regs = xe_oa_alloc_regs(oa, xe_oa_is_valid_config_reg_addr, in xe_oa_add_config_ioctl()
2321 u64_to_user_ptr(arg->regs_ptr), in xe_oa_add_config_ioctl()
2322 arg->n_regs); in xe_oa_add_config_ioctl()
2323 if (IS_ERR(regs)) { in xe_oa_add_config_ioctl()
2324 drm_dbg(&oa->xe->drm, "Failed to create OA config for mux_regs\n"); in xe_oa_add_config_ioctl()
2325 err = PTR_ERR(regs); in xe_oa_add_config_ioctl()
2328 oa_config->regs = regs; in xe_oa_add_config_ioctl()
2330 err = mutex_lock_interruptible(&oa->metrics_lock); in xe_oa_add_config_ioctl()
2335 idr_for_each_entry(&oa->metrics_idr, tmp, id) { in xe_oa_add_config_ioctl()
2336 if (!strcmp(tmp->uuid, oa_config->uuid)) { in xe_oa_add_config_ioctl()
2337 drm_dbg(&oa->xe->drm, "OA config already exists with this uuid\n"); in xe_oa_add_config_ioctl()
2338 err = -EADDRINUSE; in xe_oa_add_config_ioctl()
2345 drm_dbg(&oa->xe->drm, "Failed to create sysfs entry for OA config\n"); in xe_oa_add_config_ioctl()
2349 oa_config->id = idr_alloc(&oa->metrics_idr, oa_config, 1, 0, GFP_KERNEL); in xe_oa_add_config_ioctl()
2350 if (oa_config->id < 0) { in xe_oa_add_config_ioctl()
2351 drm_dbg(&oa->xe->drm, "Failed to create sysfs entry for OA config\n"); in xe_oa_add_config_ioctl()
2352 err = oa_config->id; in xe_oa_add_config_ioctl()
2356 mutex_unlock(&oa->metrics_lock); in xe_oa_add_config_ioctl()
2358 drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id); in xe_oa_add_config_ioctl()
2360 return oa_config->id; in xe_oa_add_config_ioctl()
2363 mutex_unlock(&oa->metrics_lock); in xe_oa_add_config_ioctl()
2366 drm_dbg(&oa->xe->drm, "Failed to add new OA config\n"); in xe_oa_add_config_ioctl()
2371 * xe_oa_remove_config_ioctl - Removes one OA config
2379 struct xe_oa *oa = &xe->oa; in xe_oa_remove_config_ioctl()
2384 if (!oa->xe) { in xe_oa_remove_config_ioctl()
2385 drm_dbg(&xe->drm, "xe oa interface not available for this system\n"); in xe_oa_remove_config_ioctl()
2386 return -ENODEV; in xe_oa_remove_config_ioctl()
2390 drm_dbg(&oa->xe->drm, "Insufficient privileges to remove xe OA config\n"); in xe_oa_remove_config_ioctl()
2391 return -EACCES; in xe_oa_remove_config_ioctl()
2395 if (XE_IOCTL_DBG(oa->xe, ret)) in xe_oa_remove_config_ioctl()
2398 ret = mutex_lock_interruptible(&oa->metrics_lock); in xe_oa_remove_config_ioctl()
2402 oa_config = idr_find(&oa->metrics_idr, arg); in xe_oa_remove_config_ioctl()
2404 drm_dbg(&oa->xe->drm, "Failed to remove unknown OA config\n"); in xe_oa_remove_config_ioctl()
2405 ret = -ENOENT; in xe_oa_remove_config_ioctl()
2409 WARN_ON(arg != oa_config->id); in xe_oa_remove_config_ioctl()
2411 sysfs_remove_group(oa->metrics_kobj, &oa_config->sysfs_metric); in xe_oa_remove_config_ioctl()
2412 idr_remove(&oa->metrics_idr, arg); in xe_oa_remove_config_ioctl()
2414 mutex_unlock(&oa->metrics_lock); in xe_oa_remove_config_ioctl()
2416 drm_dbg(&oa->xe->drm, "Removed config %s id=%i\n", oa_config->uuid, oa_config->id); in xe_oa_remove_config_ioctl()
2423 mutex_unlock(&oa->metrics_lock); in xe_oa_remove_config_ioctl()
2428 * xe_oa_register - Xe OA registration
2435 struct xe_oa *oa = &xe->oa; in xe_oa_register()
2437 if (!oa->xe) in xe_oa_register()
2440 oa->metrics_kobj = kobject_create_and_add("metrics", in xe_oa_register()
2441 &xe->drm.primary->kdev->kobj); in xe_oa_register()
2445 * xe_oa_unregister - Xe OA de-registration
2450 struct xe_oa *oa = &xe->oa; in xe_oa_unregister()
2452 if (!oa->metrics_kobj) in xe_oa_unregister()
2455 kobject_put(oa->metrics_kobj); in xe_oa_unregister()
2456 oa->metrics_kobj = NULL; in xe_oa_unregister()
2466 if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) { in __hwe_oam_unit()
2471 xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA); in __hwe_oam_unit()
2481 switch (hwe->class) { in __hwe_oa_unit()
2528 int i, num_units = gt->oa.num_oa_units; in __xe_oa_init_oa_units()
2531 struct xe_oa_unit *u = &gt->oa.oa_unit[i]; in __xe_oa_init_oa_units()
2533 if (gt->info.type != XE_GT_TYPE_MEDIA) { in __xe_oa_init_oa_units()
2534 u->regs = __oag_regs(); in __xe_oa_init_oa_units()
2535 u->type = DRM_XE_OA_UNIT_TYPE_OAG; in __xe_oa_init_oa_units()
2537 u->regs = __oam_regs(mtl_oa_base[i]); in __xe_oa_init_oa_units()
2538 u->type = DRM_XE_OA_UNIT_TYPE_OAM; in __xe_oa_init_oa_units()
2541 xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0); in __xe_oa_init_oa_units()
2544 xe_mmio_write32(&gt->mmio, u->regs.oa_debug, in __xe_oa_init_oa_units()
2548 u->oa_unit_id = gt_to_xe(gt)->oa.oa_unit_ids++; in __xe_oa_init_oa_units()
2559 u = drmm_kcalloc(&gt_to_xe(gt)->drm, num_oa_units, sizeof(*u), GFP_KERNEL); in xe_oa_init_gt()
2561 return -ENOMEM; in xe_oa_init_gt()
2566 hwe->oa_unit = NULL; in xe_oa_init_gt()
2569 hwe->oa_unit = &u[index]; in xe_oa_init_gt()
2577 gt->oa.num_oa_units = num_oa_units; in xe_oa_init_gt()
2578 gt->oa.oa_unit = u; in xe_oa_init_gt()
2582 drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->oa.gt_lock); in xe_oa_init_gt()
2592 for_each_gt(gt, oa->xe, i) { in xe_oa_init_oa_units()
2603 __set_bit(format, oa->format_mask); in oa_format_add()
2608 if (GRAPHICS_VER(oa->xe) >= 20) { in xe_oa_init_supported_formats()
2621 } else if (GRAPHICS_VERx100(oa->xe) >= 1270) { in xe_oa_init_supported_formats()
2629 } else if (GRAPHICS_VERx100(oa->xe) >= 1255) { in xe_oa_init_supported_formats()
2637 xe_assert(oa->xe, GRAPHICS_VER(oa->xe) >= 12); in xe_oa_init_supported_formats()
2646 * xe_oa_init - OA initialization during device probe
2653 struct xe_oa *oa = &xe->oa; in xe_oa_init()
2663 oa->xe = xe; in xe_oa_init()
2664 oa->oa_formats = oa_formats; in xe_oa_init()
2666 drmm_mutex_init(&oa->xe->drm, &oa->metrics_lock); in xe_oa_init()
2667 idr_init_base(&oa->metrics_idr, 1); in xe_oa_init()
2671 drm_err(&xe->drm, "OA initialization failed (%pe)\n", ERR_PTR(ret)); in xe_oa_init()
2678 oa->xe = NULL; in xe_oa_init()
2689 * xe_oa_fini - OA de-initialization during device remove
2694 struct xe_oa *oa = &xe->oa; in xe_oa_fini()
2696 if (!oa->xe) in xe_oa_fini()
2699 idr_for_each(&oa->metrics_idr, destroy_config, oa); in xe_oa_fini()
2700 idr_destroy(&oa->metrics_idr); in xe_oa_fini()
2702 oa->xe = NULL; in xe_oa_fini()