Lines Matching +full:zap +full:- +full:shader

1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/nvmem-consumer.h>
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
45 return -EINVAL; in zap_shader_load_mdt()
48 np = of_get_child_by_name(dev->of_node, "zap-shader"); in zap_shader_load_mdt()
51 return -ENODEV; in zap_shader_load_mdt()
54 mem_np = of_parse_phandle(np, "memory-region", 0); in zap_shader_load_mdt()
58 return -EINVAL; in zap_shader_load_mdt()
69 * Check for a firmware-name property. This is the new scheme in zap_shader_load_mdt()
71 * keys, allowing us to have a different zap fw path for different in zap_shader_load_mdt()
74 * If the firmware-name property is found, we bypass the in zap_shader_load_mdt()
78 * If the firmware-name property is not found, for backwards in zap_shader_load_mdt()
82 of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
93 * For new targets, we require the firmware-name property, in zap_shader_load_mdt()
94 * if a zap-shader is required, rather than falling back in zap_shader_load_mdt()
102 return -ENOENT; in zap_shader_load_mdt()
120 ret = -E2BIG; in zap_shader_load_mdt()
127 ret = -ENOMEM; in zap_shader_load_mdt()
135 * with upstream linux-firmware it would be in a qcom/ subdir.. in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
159 * If the scm call returns -EOPNOTSUPP we assume that this target in zap_shader_load_mdt()
160 * doesn't need/support the zap shader so quietly fail in zap_shader_load_mdt()
162 if (ret == -EOPNOTSUPP) in zap_shader_load_mdt()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
181 /* Short cut if we determine the zap shader isn't available/needed */ in adreno_zap_shader_load()
183 return -ENODEV; in adreno_zap_shader_load()
187 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); in adreno_zap_shader_load()
188 return -EPROBE_DEFER; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
224 start = max_t(u64, SZ_16M, geometry->aperture_start); in adreno_iommu_create_address_space()
225 size = geometry->aperture_end - start + 1; in adreno_iommu_create_address_space()
231 mmu->funcs->destroy(mmu); in adreno_iommu_create_address_space()
243 if (adreno_gpu->info->address_space_size) in adreno_private_address_space_size()
244 return adreno_gpu->info->address_space_size; in adreno_private_address_space_size()
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
270 * adreno-smmu-priv in adreno_fault_handler()
280 if (info->fsr & ARM_SMMU_FSR_TF) in adreno_fault_handler()
282 else if (info->fsr & ARM_SMMU_FSR_PF) in adreno_fault_handler()
284 else if (info->fsr & ARM_SMMU_FSR_EF) in adreno_fault_handler()
288 info->ttbr0, iova, in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
313 struct drm_device *drm = gpu->dev; in adreno_get_param()
321 *value = adreno_gpu->info->revn; in adreno_get_param()
324 *value = adreno_gpu->info->gmem; in adreno_get_param()
334 *value = adreno_gpu->chip_id; in adreno_get_param()
335 if (!adreno_gpu->info->revn) in adreno_get_param()
336 *value |= ((uint64_t) adreno_gpu->speedbin) << 32; in adreno_get_param()
339 *value = adreno_gpu->base.fast_rate; in adreno_get_param()
342 if (adreno_gpu->funcs->get_timestamp) { in adreno_get_param()
345 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
346 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
347 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
351 return -EINVAL; in adreno_get_param()
353 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
359 if (ctx->aspace) in adreno_get_param()
360 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
362 *value = gpu->global_faults; in adreno_get_param()
365 *value = gpu->suspend_count; in adreno_get_param()
368 if (ctx->aspace == gpu->aspace) in adreno_get_param()
369 return UERR(EINVAL, drm, "requires per-process pgtables"); in adreno_get_param()
370 *value = ctx->aspace->va_start; in adreno_get_param()
373 if (ctx->aspace == gpu->aspace) in adreno_get_param()
374 return UERR(EINVAL, drm, "requires per-process pgtables"); in adreno_get_param()
375 *value = ctx->aspace->va_size; in adreno_get_param()
378 *value = adreno_gpu->ubwc_config.highest_bank_bit; in adreno_get_param()
381 *value = adreno_gpu->has_ray_tracing; in adreno_get_param()
384 *value = adreno_gpu->ubwc_config.ubwc_swizzle; in adreno_get_param()
387 *value = adreno_gpu->ubwc_config.macrotile_mode; in adreno_get_param()
390 *value = adreno_gpu->uche_trap_base; in adreno_get_param()
393 return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); in adreno_get_param()
400 struct drm_device *drm = gpu->dev; in adreno_set_param()
425 mutex_lock(&gpu->lock); in adreno_set_param()
428 paramp = &ctx->comm; in adreno_set_param()
430 paramp = &ctx->cmdline; in adreno_set_param()
436 mutex_unlock(&gpu->lock); in adreno_set_param()
445 return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); in adreno_set_param()
452 struct drm_device *drm = adreno_gpu->base.dev; in adreno_request_fw()
459 return ERR_PTR(-ENOMEM); in adreno_request_fw()
465 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
466 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { in adreno_request_fw()
468 ret = request_firmware_direct(&fw, newname, drm->dev); in adreno_request_fw()
470 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", in adreno_request_fw()
472 adreno_gpu->fwloc = FW_LOCATION_NEW; in adreno_request_fw()
474 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
475 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
485 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
486 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { in adreno_request_fw()
488 ret = request_firmware_direct(&fw, fwname, drm->dev); in adreno_request_fw()
490 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", in adreno_request_fw()
492 adreno_gpu->fwloc = FW_LOCATION_LEGACY; in adreno_request_fw()
494 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
495 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
506 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
507 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { in adreno_request_fw()
509 ret = request_firmware(&fw, newname, drm->dev); in adreno_request_fw()
511 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", in adreno_request_fw()
513 adreno_gpu->fwloc = FW_LOCATION_HELPER; in adreno_request_fw()
515 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
516 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
523 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); in adreno_request_fw()
524 fw = ERR_PTR(-ENOENT); in adreno_request_fw()
534 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { in adreno_load_fw()
537 if (!adreno_gpu->info->fw[i]) in adreno_load_fw()
545 if (adreno_gpu->fw[i]) in adreno_load_fw()
548 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); in adreno_load_fw()
552 adreno_gpu->fw[i] = fw; in adreno_load_fw()
564 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
565 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
570 memcpy(ptr, &fw->data[4], fw->size - 4); in adreno_fw_create_bo()
582 VERB("%s", gpu->name); in adreno_hw_init()
584 if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 && in adreno_hw_init()
589 DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret); in adreno_hw_init()
592 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
593 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
598 ring->cur = ring->start; in adreno_hw_init()
599 ring->next = ring->start; in adreno_hw_init()
600 ring->memptrs->rptr = 0; in adreno_hw_init()
601 ring->memptrs->bv_fence = ring->fctx->completed_fence; in adreno_hw_init()
607 if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { in adreno_hw_init()
608 ring->memptrs->fence = ring->fctx->last_fence; in adreno_hw_init()
619 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr()
621 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
626 return gpu->rb[0]; in adreno_active_ring()
631 struct drm_device *dev = gpu->dev; in adreno_recover()
634 // XXX pm-runtime?? we *need* the device to be off after this in adreno_recover()
635 // so maybe continuing to call ->pm_suspend/resume() is better? in adreno_recover()
637 gpu->funcs->pm_suspend(gpu); in adreno_recover()
638 gpu->funcs->pm_resume(gpu); in adreno_recover()
642 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
652 ring->cur = ring->next; in adreno_flush()
657 * the ringbuffer and rb->next hasn't wrapped to zero yet in adreno_flush()
678 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
688 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
690 kref_init(&state->ref); in adreno_gpu_state_get()
692 ktime_get_real_ts64(&state->time); in adreno_gpu_state_get()
694 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
697 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
698 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
699 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
700 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
701 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
704 size = state->ring[i].wptr; in adreno_gpu_state_get()
707 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) in adreno_gpu_state_get()
708 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
712 state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); in adreno_gpu_state_get()
713 if (state->ring[i].data) in adreno_gpu_state_get()
714 state->ring[i].data_size = size << 2; in adreno_gpu_state_get()
719 if (!adreno_gpu->registers) in adreno_gpu_state_get()
723 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) in adreno_gpu_state_get()
724 count += adreno_gpu->registers[i + 1] - in adreno_gpu_state_get()
725 adreno_gpu->registers[i] + 1; in adreno_gpu_state_get()
727 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); in adreno_gpu_state_get()
728 if (state->registers) { in adreno_gpu_state_get()
731 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_gpu_state_get()
732 u32 start = adreno_gpu->registers[i]; in adreno_gpu_state_get()
733 u32 end = adreno_gpu->registers[i + 1]; in adreno_gpu_state_get()
737 state->registers[pos++] = addr; in adreno_gpu_state_get()
738 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
742 state->nr_registers = count; in adreno_gpu_state_get()
752 for (i = 0; i < ARRAY_SIZE(state->ring); i++) in adreno_gpu_state_destroy()
753 kvfree(state->ring[i].data); in adreno_gpu_state_destroy()
755 for (i = 0; state->bos && i < state->nr_bos; i++) in adreno_gpu_state_destroy()
756 kvfree(state->bos[i].data); in adreno_gpu_state_destroy()
758 kfree(state->bos); in adreno_gpu_state_destroy()
759 kfree(state->comm); in adreno_gpu_state_destroy()
760 kfree(state->cmd); in adreno_gpu_state_destroy()
761 kfree(state->registers); in adreno_gpu_state_destroy()
778 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); in adreno_gpu_state_put()
807 buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", in adreno_gpu_ascii85_encode()
830 * Only dump the non-zero part of the buffer - rarely will in adreno_show_object()
870 adreno_gpu->info->revn, in adreno_show()
871 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_show()
877 if (state->fault_info.ttbr0) { in adreno_show()
878 const struct msm_gpu_fault_info *info = &state->fault_info; in adreno_show()
880 drm_puts(p, "fault-info:\n"); in adreno_show()
881 drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); in adreno_show()
882 drm_printf(p, " - iova=%.16lx\n", info->iova); in adreno_show()
883 drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); in adreno_show()
884 drm_printf(p, " - type=%s\n", info->type); in adreno_show()
885 drm_printf(p, " - source=%s\n", info->block); in adreno_show()
888 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); in adreno_show()
892 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
893 drm_printf(p, " - id: %d\n", i); in adreno_show()
894 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); in adreno_show()
895 drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); in adreno_show()
896 drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); in adreno_show()
897 drm_printf(p, " rptr: %u\n", state->ring[i].rptr); in adreno_show()
898 drm_printf(p, " wptr: %u\n", state->ring[i].wptr); in adreno_show()
901 adreno_show_object(p, &state->ring[i].data, in adreno_show()
902 state->ring[i].data_size, &state->ring[i].encoded); in adreno_show()
905 if (state->bos) { in adreno_show()
908 for (i = 0; i < state->nr_bos; i++) { in adreno_show()
909 drm_printf(p, " - iova: 0x%016llx\n", in adreno_show()
910 state->bos[i].iova); in adreno_show()
911 drm_printf(p, " size: %zd\n", state->bos[i].size); in adreno_show()
912 drm_printf(p, " flags: 0x%x\n", state->bos[i].flags); in adreno_show()
913 drm_printf(p, " name: %-32s\n", state->bos[i].name); in adreno_show()
915 adreno_show_object(p, &state->bos[i].data, in adreno_show()
916 state->bos[i].size, &state->bos[i].encoded); in adreno_show()
920 if (state->nr_registers) { in adreno_show()
923 for (i = 0; i < state->nr_registers; i++) { in adreno_show()
924 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in adreno_show()
925 state->registers[i * 2] << 2, in adreno_show()
926 state->registers[(i * 2) + 1]); in adreno_show()
944 adreno_gpu->info->revn, in adreno_dump_info()
945 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_dump_info()
947 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
948 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
951 ring->memptrs->fence, in adreno_dump_info()
952 ring->fctx->last_fence); in adreno_dump_info()
965 if (!adreno_gpu->registers) in adreno_dump()
969 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
970 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_dump()
971 uint32_t start = adreno_gpu->registers[i]; in adreno_dump()
972 uint32_t end = adreno_gpu->registers[i+1]; in adreno_dump()
984 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
986 /* Use ring->next to calculate free size */ in ring_freewords()
987 uint32_t wptr = ring->next - ring->start; in ring_freewords()
989 return (rptr + (size - 1) - wptr) % size; in ring_freewords()
995 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
997 ring->id); in adreno_wait_ring()
1008 gpu->fast_rate = 0; in adreno_get_pwrlevels()
1012 if (ret == -ENODEV) { in adreno_get_pwrlevels()
1022 return -ENODEV; in adreno_get_pwrlevels()
1034 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1037 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1050 if (PTR_ERR(ocmem) == -ENODEV) { in adreno_gpu_ocmem_init()
1062 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem); in adreno_gpu_ocmem_init()
1066 adreno_ocmem->ocmem = ocmem; in adreno_gpu_ocmem_init()
1067 adreno_ocmem->base = ocmem_hdl->addr; in adreno_gpu_ocmem_init()
1068 adreno_ocmem->hdl = ocmem_hdl; in adreno_gpu_ocmem_init()
1070 if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem)) in adreno_gpu_ocmem_init()
1071 return -ENOMEM; in adreno_gpu_ocmem_init()
1078 if (adreno_ocmem && adreno_ocmem->base) in adreno_gpu_ocmem_cleanup()
1079 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, in adreno_gpu_ocmem_cleanup()
1080 adreno_ocmem->hdl); in adreno_gpu_ocmem_cleanup()
1092 struct device *dev = &pdev->dev; in adreno_gpu_init()
1093 struct adreno_platform_config *config = dev->platform_data; in adreno_gpu_init()
1095 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init()
1100 adreno_gpu->funcs = funcs; in adreno_gpu_init()
1101 adreno_gpu->info = config->info; in adreno_gpu_init()
1102 adreno_gpu->chip_id = config->chip_id; in adreno_gpu_init()
1104 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1105 gpu->pdev = pdev; in adreno_gpu_init()
1109 adreno_gpu->info->family < ADRENO_6XX_GEN1) { in adreno_gpu_init()
1127 adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); in adreno_gpu_init()
1130 ADRENO_CHIPID_ARGS(config->chip_id)); in adreno_gpu_init()
1132 return -ENOMEM; in adreno_gpu_init()
1143 adreno_gpu->info->inactive_period); in adreno_gpu_init()
1146 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, in adreno_gpu_init()
1152 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup()
1153 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()
1156 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) in adreno_gpu_cleanup()
1157 release_firmware(adreno_gpu->fw[i]); in adreno_gpu_cleanup()
1159 if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) in adreno_gpu_cleanup()
1160 pm_runtime_disable(&priv->gpu_pdev->dev); in adreno_gpu_cleanup()
1162 msm_gpu_cleanup(&adreno_gpu->base); in adreno_gpu_cleanup()