Lines Matching +full:pm +full:- +full:alive
72 pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL"); in amdgpu_ctx_to_drm_sched_prio()
113 return -EACCES; in amdgpu_ctx_priority_permit()
141 struct amdgpu_device *adev = ctx->mgr->adev; in amdgpu_ctx_get_hw_prio()
145 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? in amdgpu_ctx_get_hw_prio()
146 ctx->init_priority : ctx->override_priority; in amdgpu_ctx_get_hw_prio()
163 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) in amdgpu_ctx_get_hw_prio()
179 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags)) in amdgpu_ctx_fence_time()
183 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags)) in amdgpu_ctx_fence_time()
184 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time()
186 return ktime_sub(s_fence->finished.timestamp, in amdgpu_ctx_fence_time()
187 s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time()
196 spin_lock(&ctx->ring_lock); in amdgpu_ctx_entity_time()
198 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time()
200 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_entity_time()
208 struct amdgpu_device *adev = ctx->mgr->adev; in amdgpu_ctx_init_entity()
218 return -ENOMEM; in amdgpu_ctx_init_entity()
220 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? in amdgpu_ctx_init_entity()
221 ctx->init_priority : ctx->override_priority; in amdgpu_ctx_init_entity()
222 entity->hw_ip = hw_ip; in amdgpu_ctx_init_entity()
223 entity->sequence = 1; in amdgpu_ctx_init_entity()
229 if (!(adev)->xcp_mgr) { in amdgpu_ctx_init_entity()
230 scheds = adev->gpu_sched[hw_ip][hw_prio].sched; in amdgpu_ctx_init_entity()
231 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; in amdgpu_ctx_init_entity()
235 fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr); in amdgpu_ctx_init_entity()
252 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, in amdgpu_ctx_init_entity()
253 &ctx->guilty); in amdgpu_ctx_init_entity()
258 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity)) in amdgpu_ctx_init_entity()
264 drm_sched_entity_fini(&entity->entity); in amdgpu_ctx_init_entity()
282 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity()
283 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity()
295 struct amdgpu_device *adev = ctx->mgr->adev; in amdgpu_ctx_get_stable_pstate()
323 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_ctx_init()
333 kref_init(&ctx->refcount); in amdgpu_ctx_init()
334 ctx->mgr = mgr; in amdgpu_ctx_init()
335 spin_lock_init(&ctx->ring_lock); in amdgpu_ctx_init()
337 ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter); in amdgpu_ctx_init()
338 ctx->reset_counter_query = ctx->reset_counter; in amdgpu_ctx_init()
339 ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm); in amdgpu_ctx_init()
340 ctx->init_priority = priority; in amdgpu_ctx_init()
341 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; in amdgpu_ctx_init()
347 if (mgr->adev->pm.stable_pstate_ctx) in amdgpu_ctx_init()
348 ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate; in amdgpu_ctx_init()
350 ctx->stable_pstate = current_stable_pstate; in amdgpu_ctx_init()
352 ctx->ctx_mgr = &(fpriv->ctx_mgr); in amdgpu_ctx_init()
359 struct amdgpu_device *adev = ctx->mgr->adev; in amdgpu_ctx_set_stable_pstate()
364 mutex_lock(&adev->pm.stable_pstate_ctx_lock); in amdgpu_ctx_set_stable_pstate()
365 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) { in amdgpu_ctx_set_stable_pstate()
366 r = -EBUSY; in amdgpu_ctx_set_stable_pstate()
391 r = -EINVAL; in amdgpu_ctx_set_stable_pstate()
398 adev->pm.stable_pstate_ctx = NULL; in amdgpu_ctx_set_stable_pstate()
400 adev->pm.stable_pstate_ctx = ctx; in amdgpu_ctx_set_stable_pstate()
402 mutex_unlock(&adev->pm.stable_pstate_ctx_lock); in amdgpu_ctx_set_stable_pstate()
410 struct amdgpu_ctx_mgr *mgr = ctx->mgr; in amdgpu_ctx_fini()
411 struct amdgpu_device *adev = mgr->adev; in amdgpu_ctx_fini()
421 spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]); in amdgpu_ctx_fini()
422 atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]); in amdgpu_ctx_fini()
427 amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate); in amdgpu_ctx_fini()
442 return -EINVAL; in amdgpu_ctx_get_entity()
445 /* Right now all IPs have only one instance - multiple rings. */ in amdgpu_ctx_get_entity()
448 return -EINVAL; in amdgpu_ctx_get_entity()
453 return -EINVAL; in amdgpu_ctx_get_entity()
456 if (ctx->entities[hw_ip][ring] == NULL) { in amdgpu_ctx_get_entity()
462 ctx_entity = &ctx->entities[hw_ip][ring]->entity; in amdgpu_ctx_get_entity()
479 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; in amdgpu_ctx_alloc()
485 return -ENOMEM; in amdgpu_ctx_alloc()
487 mutex_lock(&mgr->lock); in amdgpu_ctx_alloc()
488 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL); in amdgpu_ctx_alloc()
490 mutex_unlock(&mgr->lock); in amdgpu_ctx_alloc()
498 idr_remove(&mgr->ctx_handles, *id); in amdgpu_ctx_alloc()
502 mutex_unlock(&mgr->lock); in amdgpu_ctx_alloc()
514 if (!ctx->entities[i][j]) in amdgpu_ctx_do_release()
517 drm_sched_entity_destroy(&ctx->entities[i][j]->entity); in amdgpu_ctx_do_release()
526 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; in amdgpu_ctx_free()
529 mutex_lock(&mgr->lock); in amdgpu_ctx_free()
530 ctx = idr_remove(&mgr->ctx_handles, id); in amdgpu_ctx_free()
532 kref_put(&ctx->refcount, amdgpu_ctx_do_release); in amdgpu_ctx_free()
533 mutex_unlock(&mgr->lock); in amdgpu_ctx_free()
534 return ctx ? 0 : -EINVAL; in amdgpu_ctx_free()
546 return -EINVAL; in amdgpu_ctx_query()
548 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_query()
549 mutex_lock(&mgr->lock); in amdgpu_ctx_query()
550 ctx = idr_find(&mgr->ctx_handles, id); in amdgpu_ctx_query()
552 mutex_unlock(&mgr->lock); in amdgpu_ctx_query()
553 return -EINVAL; in amdgpu_ctx_query()
557 out->state.flags = 0x0; in amdgpu_ctx_query()
558 out->state.hangs = 0x0; in amdgpu_ctx_query()
561 reset_counter = atomic_read(&adev->gpu_reset_counter); in amdgpu_ctx_query()
563 if (ctx->reset_counter_query == reset_counter) in amdgpu_ctx_query()
564 out->state.reset_status = AMDGPU_CTX_NO_RESET; in amdgpu_ctx_query()
566 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; in amdgpu_ctx_query()
567 ctx->reset_counter_query = reset_counter; in amdgpu_ctx_query()
569 mutex_unlock(&mgr->lock); in amdgpu_ctx_query()
584 return -EINVAL; in amdgpu_ctx_query2()
586 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_query2()
587 mutex_lock(&mgr->lock); in amdgpu_ctx_query2()
588 ctx = idr_find(&mgr->ctx_handles, id); in amdgpu_ctx_query2()
590 mutex_unlock(&mgr->lock); in amdgpu_ctx_query2()
591 return -EINVAL; in amdgpu_ctx_query2()
594 out->state.flags = 0x0; in amdgpu_ctx_query2()
595 out->state.hangs = 0x0; in amdgpu_ctx_query2()
597 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) in amdgpu_ctx_query2()
598 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; in amdgpu_ctx_query2()
600 if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm)) in amdgpu_ctx_query2()
601 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; in amdgpu_ctx_query2()
603 if (atomic_read(&ctx->guilty)) in amdgpu_ctx_query2()
604 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; in amdgpu_ctx_query2()
607 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS; in amdgpu_ctx_query2()
609 if (adev->ras_enabled && con) { in amdgpu_ctx_query2()
616 ce_count = atomic_read(&con->ras_ce_count); in amdgpu_ctx_query2()
617 ue_count = atomic_read(&con->ras_ue_count); in amdgpu_ctx_query2()
619 if (ce_count != ctx->ras_counter_ce) { in amdgpu_ctx_query2()
620 ctx->ras_counter_ce = ce_count; in amdgpu_ctx_query2()
621 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; in amdgpu_ctx_query2()
624 if (ue_count != ctx->ras_counter_ue) { in amdgpu_ctx_query2()
625 ctx->ras_counter_ue = ue_count; in amdgpu_ctx_query2()
626 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; in amdgpu_ctx_query2()
629 schedule_delayed_work(&con->ras_counte_delay_work, in amdgpu_ctx_query2()
633 mutex_unlock(&mgr->lock); in amdgpu_ctx_query2()
646 return -EINVAL; in amdgpu_ctx_stable_pstate()
648 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_stable_pstate()
649 mutex_lock(&mgr->lock); in amdgpu_ctx_stable_pstate()
650 ctx = idr_find(&mgr->ctx_handles, id); in amdgpu_ctx_stable_pstate()
652 mutex_unlock(&mgr->lock); in amdgpu_ctx_stable_pstate()
653 return -EINVAL; in amdgpu_ctx_stable_pstate()
661 mutex_unlock(&mgr->lock); in amdgpu_ctx_stable_pstate()
674 struct amdgpu_fpriv *fpriv = filp->driver_priv; in amdgpu_ctx_ioctl()
676 id = args->in.ctx_id; in amdgpu_ctx_ioctl()
677 priority = args->in.priority; in amdgpu_ctx_ioctl()
686 switch (args->in.op) { in amdgpu_ctx_ioctl()
688 if (args->in.flags) in amdgpu_ctx_ioctl()
689 return -EINVAL; in amdgpu_ctx_ioctl()
691 args->out.alloc.ctx_id = id; in amdgpu_ctx_ioctl()
694 if (args->in.flags) in amdgpu_ctx_ioctl()
695 return -EINVAL; in amdgpu_ctx_ioctl()
699 if (args->in.flags) in amdgpu_ctx_ioctl()
700 return -EINVAL; in amdgpu_ctx_ioctl()
701 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); in amdgpu_ctx_ioctl()
704 if (args->in.flags) in amdgpu_ctx_ioctl()
705 return -EINVAL; in amdgpu_ctx_ioctl()
706 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); in amdgpu_ctx_ioctl()
709 if (args->in.flags) in amdgpu_ctx_ioctl()
710 return -EINVAL; in amdgpu_ctx_ioctl()
713 args->out.pstate.flags = stable_pstate; in amdgpu_ctx_ioctl()
716 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK) in amdgpu_ctx_ioctl()
717 return -EINVAL; in amdgpu_ctx_ioctl()
718 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK; in amdgpu_ctx_ioctl()
720 return -EINVAL; in amdgpu_ctx_ioctl()
724 return -EINVAL; in amdgpu_ctx_ioctl()
738 mgr = &fpriv->ctx_mgr; in amdgpu_ctx_get()
740 mutex_lock(&mgr->lock); in amdgpu_ctx_get()
741 ctx = idr_find(&mgr->ctx_handles, id); in amdgpu_ctx_get()
743 kref_get(&ctx->refcount); in amdgpu_ctx_get()
744 mutex_unlock(&mgr->lock); in amdgpu_ctx_get()
751 return -EINVAL; in amdgpu_ctx_put()
753 kref_put(&ctx->refcount, amdgpu_ctx_do_release); in amdgpu_ctx_put()
762 uint64_t seq = centity->sequence; in amdgpu_ctx_add_fence()
766 idx = seq & (amdgpu_sched_jobs - 1); in amdgpu_ctx_add_fence()
767 other = centity->fences[idx]; in amdgpu_ctx_add_fence()
772 spin_lock(&ctx->ring_lock); in amdgpu_ctx_add_fence()
773 centity->fences[idx] = fence; in amdgpu_ctx_add_fence()
774 centity->sequence++; in amdgpu_ctx_add_fence()
775 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_add_fence()
778 &ctx->mgr->time_spend[centity->hw_ip]); in amdgpu_ctx_add_fence()
791 spin_lock(&ctx->ring_lock); in amdgpu_ctx_get_fence()
794 seq = centity->sequence - 1; in amdgpu_ctx_get_fence()
796 if (seq >= centity->sequence) { in amdgpu_ctx_get_fence()
797 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_get_fence()
798 return ERR_PTR(-EINVAL); in amdgpu_ctx_get_fence()
802 if (seq + amdgpu_sched_jobs < centity->sequence) { in amdgpu_ctx_get_fence()
803 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_get_fence()
807 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence()
808 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_get_fence()
818 struct amdgpu_device *adev = ctx->mgr->adev; in amdgpu_ctx_set_entity_priority()
824 drm_sched_entity_set_priority(&aentity->entity, in amdgpu_ctx_set_entity_priority()
831 scheds = adev->gpu_sched[hw_ip][hw_prio].sched; in amdgpu_ctx_set_entity_priority()
832 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; in amdgpu_ctx_set_entity_priority()
833 drm_sched_entity_modify_sched(&aentity->entity, scheds, in amdgpu_ctx_set_entity_priority()
844 ctx->override_priority = priority; in amdgpu_ctx_priority_override()
846 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? in amdgpu_ctx_priority_override()
847 ctx->init_priority : ctx->override_priority; in amdgpu_ctx_priority_override()
850 if (!ctx->entities[i][j]) in amdgpu_ctx_priority_override()
853 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], in amdgpu_ctx_priority_override()
867 spin_lock(&ctx->ring_lock); in amdgpu_ctx_wait_prev_fence()
868 idx = centity->sequence & (amdgpu_sched_jobs - 1); in amdgpu_ctx_wait_prev_fence()
869 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
870 spin_unlock(&ctx->ring_lock); in amdgpu_ctx_wait_prev_fence()
876 if (r < 0 && r != -ERESTARTSYS) in amdgpu_ctx_wait_prev_fence()
888 mgr->adev = adev; in amdgpu_ctx_mgr_init()
889 mutex_init(&mgr->lock); in amdgpu_ctx_mgr_init()
890 idr_init_base(&mgr->ctx_handles, 1); in amdgpu_ctx_mgr_init()
893 atomic64_set(&mgr->time_spend[i], 0); in amdgpu_ctx_mgr_init()
902 idp = &mgr->ctx_handles; in amdgpu_ctx_mgr_entity_flush()
904 mutex_lock(&mgr->lock); in amdgpu_ctx_mgr_entity_flush()
910 if (!ctx->entities[i][j]) in amdgpu_ctx_mgr_entity_flush()
913 entity = &ctx->entities[i][j]->entity; in amdgpu_ctx_mgr_entity_flush()
918 mutex_unlock(&mgr->lock); in amdgpu_ctx_mgr_entity_flush()
928 idp = &mgr->ctx_handles; in amdgpu_ctx_mgr_entity_fini()
931 if (kref_read(&ctx->refcount) != 1) { in amdgpu_ctx_mgr_entity_fini()
932 DRM_ERROR("ctx %p is still alive\n", ctx); in amdgpu_ctx_mgr_entity_fini()
940 if (!ctx->entities[i][j]) in amdgpu_ctx_mgr_entity_fini()
943 entity = &ctx->entities[i][j]->entity; in amdgpu_ctx_mgr_entity_fini()
958 idp = &mgr->ctx_handles; in amdgpu_ctx_mgr_fini()
961 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) in amdgpu_ctx_mgr_fini()
962 DRM_ERROR("ctx %p is still alive\n", ctx); in amdgpu_ctx_mgr_fini()
965 idr_destroy(&mgr->ctx_handles); in amdgpu_ctx_mgr_fini()
966 mutex_destroy(&mgr->lock); in amdgpu_ctx_mgr_fini()
981 mutex_lock(&mgr->lock); in amdgpu_ctx_mgr_usage()
983 uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]); in amdgpu_ctx_mgr_usage()
988 idr_for_each_entry(&mgr->ctx_handles, ctx, id) { in amdgpu_ctx_mgr_usage()
994 centity = ctx->entities[hw_ip][i]; in amdgpu_ctx_mgr_usage()
1002 mutex_unlock(&mgr->lock); in amdgpu_ctx_mgr_usage()