1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024, Advanced Micro Devices, Inc.
4 */
5
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_gem.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_syncobj.h>
12 #include <linux/hmm.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16
17 #include "aie2_msg_priv.h"
18 #include "aie2_pci.h"
19 #include "aie2_solver.h"
20 #include "amdxdna_ctx.h"
21 #include "amdxdna_gem.h"
22 #include "amdxdna_mailbox.h"
23 #include "amdxdna_pci_drv.h"
24
25 static bool force_cmdlist;
26 module_param(force_cmdlist, bool, 0600);
27 MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
28
29 #define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
30
aie2_job_release(struct kref * ref)31 static void aie2_job_release(struct kref *ref)
32 {
33 struct amdxdna_sched_job *job;
34
35 job = container_of(ref, struct amdxdna_sched_job, refcnt);
36 amdxdna_sched_job_cleanup(job);
37 if (job->out_fence)
38 dma_fence_put(job->out_fence);
39 kfree(job);
40 }
41
aie2_job_put(struct amdxdna_sched_job * job)42 static void aie2_job_put(struct amdxdna_sched_job *job)
43 {
44 kref_put(&job->refcnt, aie2_job_release);
45 }
46
47 /* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
aie2_hwctx_stop(struct amdxdna_dev * xdna,struct amdxdna_hwctx * hwctx,struct drm_sched_job * bad_job)48 static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
49 struct drm_sched_job *bad_job)
50 {
51 drm_sched_stop(&hwctx->priv->sched, bad_job);
52 aie2_destroy_context(xdna->dev_handle, hwctx);
53 }
54
aie2_hwctx_restart(struct amdxdna_dev * xdna,struct amdxdna_hwctx * hwctx)55 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
56 {
57 struct amdxdna_gem_obj *heap = hwctx->priv->heap;
58 int ret;
59
60 ret = aie2_create_context(xdna->dev_handle, hwctx);
61 if (ret) {
62 XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
63 goto out;
64 }
65
66 ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
67 heap->mem.userptr, heap->mem.size);
68 if (ret) {
69 XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
70 goto out;
71 }
72
73 if (hwctx->status != HWCTX_STAT_READY) {
74 XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
75 goto out;
76 }
77
78 ret = aie2_config_cu(hwctx);
79 if (ret) {
80 XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
81 goto out;
82 }
83
84 out:
85 drm_sched_start(&hwctx->priv->sched, 0);
86 XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
87 return ret;
88 }
89
aie2_restart_ctx(struct amdxdna_client * client)90 void aie2_restart_ctx(struct amdxdna_client *client)
91 {
92 struct amdxdna_dev *xdna = client->xdna;
93 struct amdxdna_hwctx *hwctx;
94 unsigned long hwctx_id;
95
96 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
97 mutex_lock(&client->hwctx_lock);
98 amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
99 if (hwctx->status != HWCTX_STAT_STOP)
100 continue;
101
102 hwctx->status = hwctx->old_status;
103 XDNA_DBG(xdna, "Resetting %s", hwctx->name);
104 aie2_hwctx_restart(xdna, hwctx);
105 }
106 mutex_unlock(&client->hwctx_lock);
107 }
108
aie2_cmd_get_out_fence(struct amdxdna_hwctx * hwctx,u64 seq)109 static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
110 {
111 struct dma_fence *fence, *out_fence = NULL;
112 int ret;
113
114 fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
115 if (!fence)
116 return NULL;
117
118 ret = dma_fence_chain_find_seqno(&fence, seq);
119 if (ret)
120 goto out;
121
122 out_fence = dma_fence_get(dma_fence_chain_contained(fence));
123
124 out:
125 dma_fence_put(fence);
126 return out_fence;
127 }
128
aie2_hwctx_wait_for_idle(struct amdxdna_hwctx * hwctx)129 static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
130 {
131 struct dma_fence *fence;
132
133 fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
134 if (!fence)
135 return;
136
137 dma_fence_wait(fence, false);
138 dma_fence_put(fence);
139 }
140
aie2_hwctx_suspend(struct amdxdna_hwctx * hwctx)141 void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
142 {
143 struct amdxdna_dev *xdna = hwctx->client->xdna;
144
145 /*
146 * Command timeout is unlikely. But if it happens, it doesn't
147 * break the system. aie2_hwctx_stop() will destroy mailbox
148 * and abort all commands.
149 */
150 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
151 aie2_hwctx_wait_for_idle(hwctx);
152 aie2_hwctx_stop(xdna, hwctx, NULL);
153 hwctx->old_status = hwctx->status;
154 hwctx->status = HWCTX_STAT_STOP;
155 }
156
aie2_hwctx_resume(struct amdxdna_hwctx * hwctx)157 void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
158 {
159 struct amdxdna_dev *xdna = hwctx->client->xdna;
160
161 /*
162 * The resume path cannot guarantee that mailbox channel can be
163 * regenerated. If this happen, when submit message to this
164 * mailbox channel, error will return.
165 */
166 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
167 hwctx->status = hwctx->old_status;
168 aie2_hwctx_restart(xdna, hwctx);
169 }
170
171 static void
aie2_sched_notify(struct amdxdna_sched_job * job)172 aie2_sched_notify(struct amdxdna_sched_job *job)
173 {
174 struct dma_fence *fence = job->fence;
175
176 trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
177 job->hwctx->priv->completed++;
178 dma_fence_signal(fence);
179
180 up(&job->hwctx->priv->job_sem);
181 job->job_done = true;
182 dma_fence_put(fence);
183 mmput_async(job->mm);
184 aie2_job_put(job);
185 }
186
187 static int
aie2_sched_resp_handler(void * handle,const u32 * data,size_t size)188 aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
189 {
190 struct amdxdna_sched_job *job = handle;
191 struct amdxdna_gem_obj *cmd_abo;
192 u32 ret = 0;
193 u32 status;
194
195 cmd_abo = job->cmd_bo;
196
197 if (unlikely(!data))
198 goto out;
199
200 if (unlikely(size != sizeof(u32))) {
201 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
202 ret = -EINVAL;
203 goto out;
204 }
205
206 status = *data;
207 XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
208 if (status == AIE2_STATUS_SUCCESS)
209 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
210 else
211 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
212
213 out:
214 aie2_sched_notify(job);
215 return ret;
216 }
217
218 static int
aie2_sched_nocmd_resp_handler(void * handle,const u32 * data,size_t size)219 aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
220 {
221 struct amdxdna_sched_job *job = handle;
222 u32 ret = 0;
223 u32 status;
224
225 if (unlikely(!data))
226 goto out;
227
228 if (unlikely(size != sizeof(u32))) {
229 ret = -EINVAL;
230 goto out;
231 }
232
233 status = *data;
234 XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
235
236 out:
237 aie2_sched_notify(job);
238 return ret;
239 }
240
241 static int
aie2_sched_cmdlist_resp_handler(void * handle,const u32 * data,size_t size)242 aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
243 {
244 struct amdxdna_sched_job *job = handle;
245 struct amdxdna_gem_obj *cmd_abo;
246 struct cmd_chain_resp *resp;
247 struct amdxdna_dev *xdna;
248 u32 fail_cmd_status;
249 u32 fail_cmd_idx;
250 u32 ret = 0;
251
252 cmd_abo = job->cmd_bo;
253 if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
254 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
255 ret = -EINVAL;
256 goto out;
257 }
258
259 resp = (struct cmd_chain_resp *)data;
260 xdna = job->hwctx->client->xdna;
261 XDNA_DBG(xdna, "Status 0x%x", resp->status);
262 if (resp->status == AIE2_STATUS_SUCCESS) {
263 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
264 goto out;
265 }
266
267 /* Slow path to handle error, read from ringbuf on BAR */
268 fail_cmd_idx = resp->fail_cmd_idx;
269 fail_cmd_status = resp->fail_cmd_status;
270 XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
271 fail_cmd_idx, fail_cmd_status);
272
273 if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
274 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
275 ret = -EINVAL;
276 goto out;
277 }
278 amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
279
280 if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
281 struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
282
283 cc->error_index = fail_cmd_idx;
284 if (cc->error_index >= cc->command_count)
285 cc->error_index = 0;
286 }
287 out:
288 aie2_sched_notify(job);
289 return ret;
290 }
291
292 static struct dma_fence *
aie2_sched_job_run(struct drm_sched_job * sched_job)293 aie2_sched_job_run(struct drm_sched_job *sched_job)
294 {
295 struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
296 struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
297 struct amdxdna_hwctx *hwctx = job->hwctx;
298 struct dma_fence *fence;
299 int ret;
300
301 if (!mmget_not_zero(job->mm))
302 return ERR_PTR(-ESRCH);
303
304 kref_get(&job->refcnt);
305 fence = dma_fence_get(job->fence);
306
307 if (unlikely(!cmd_abo)) {
308 ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
309 goto out;
310 }
311
312 amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
313
314 if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
315 ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
316 else if (force_cmdlist)
317 ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
318 else
319 ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
320
321 out:
322 if (ret) {
323 dma_fence_put(job->fence);
324 aie2_job_put(job);
325 mmput(job->mm);
326 fence = ERR_PTR(ret);
327 }
328 trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
329
330 return fence;
331 }
332
aie2_sched_job_free(struct drm_sched_job * sched_job)333 static void aie2_sched_job_free(struct drm_sched_job *sched_job)
334 {
335 struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
336 struct amdxdna_hwctx *hwctx = job->hwctx;
337
338 trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
339 if (!job->job_done)
340 up(&hwctx->priv->job_sem);
341
342 drm_sched_job_cleanup(sched_job);
343 aie2_job_put(job);
344 }
345
346 static enum drm_gpu_sched_stat
aie2_sched_job_timedout(struct drm_sched_job * sched_job)347 aie2_sched_job_timedout(struct drm_sched_job *sched_job)
348 {
349 struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
350 struct amdxdna_hwctx *hwctx = job->hwctx;
351 struct amdxdna_dev *xdna;
352
353 xdna = hwctx->client->xdna;
354 trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
355 mutex_lock(&xdna->dev_lock);
356 aie2_hwctx_stop(xdna, hwctx, sched_job);
357
358 aie2_hwctx_restart(xdna, hwctx);
359 mutex_unlock(&xdna->dev_lock);
360
361 return DRM_GPU_SCHED_STAT_NOMINAL;
362 }
363
364 const struct drm_sched_backend_ops sched_ops = {
365 .run_job = aie2_sched_job_run,
366 .free_job = aie2_sched_job_free,
367 .timedout_job = aie2_sched_job_timedout,
368 };
369
aie2_hwctx_col_list(struct amdxdna_hwctx * hwctx)370 static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
371 {
372 struct amdxdna_dev *xdna = hwctx->client->xdna;
373 struct amdxdna_dev_hdl *ndev;
374 int start, end, first, last;
375 u32 width = 1, entries = 0;
376 int i;
377
378 if (!hwctx->num_tiles) {
379 XDNA_ERR(xdna, "Number of tiles is zero");
380 return -EINVAL;
381 }
382
383 ndev = xdna->dev_handle;
384 if (unlikely(!ndev->metadata.core.row_count)) {
385 XDNA_WARN(xdna, "Core tile row count is zero");
386 return -EINVAL;
387 }
388
389 hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
390 if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
391 XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
392 return -EINVAL;
393 }
394
395 if (ndev->priv->col_align == COL_ALIGN_NATURE)
396 width = hwctx->num_col;
397
398 /*
399 * In range [start, end], find out columns that is multiple of width.
400 * 'first' is the first column,
401 * 'last' is the last column,
402 * 'entries' is the total number of columns.
403 */
404 start = xdna->dev_info->first_col;
405 end = ndev->total_col - hwctx->num_col;
406 if (start > 0 && end == 0) {
407 XDNA_DBG(xdna, "Force start from col 0");
408 start = 0;
409 }
410 first = start + (width - start % width) % width;
411 last = end - end % width;
412 if (last >= first)
413 entries = (last - first) / width + 1;
414 XDNA_DBG(xdna, "start %d end %d first %d last %d",
415 start, end, first, last);
416
417 if (unlikely(!entries)) {
418 XDNA_ERR(xdna, "Start %d end %d width %d",
419 start, end, width);
420 return -EINVAL;
421 }
422
423 hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
424 if (!hwctx->col_list)
425 return -ENOMEM;
426
427 hwctx->col_list_len = entries;
428 hwctx->col_list[0] = first;
429 for (i = 1; i < entries; i++)
430 hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
431
432 print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
433 entries * sizeof(*hwctx->col_list), false);
434 return 0;
435 }
436
aie2_alloc_resource(struct amdxdna_hwctx * hwctx)437 static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
438 {
439 struct amdxdna_dev *xdna = hwctx->client->xdna;
440 struct alloc_requests *xrs_req;
441 int ret;
442
443 xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
444 if (!xrs_req)
445 return -ENOMEM;
446
447 xrs_req->cdo.start_cols = hwctx->col_list;
448 xrs_req->cdo.cols_len = hwctx->col_list_len;
449 xrs_req->cdo.ncols = hwctx->num_col;
450 xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
451
452 xrs_req->rqos.gops = hwctx->qos.gops;
453 xrs_req->rqos.fps = hwctx->qos.fps;
454 xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
455 xrs_req->rqos.latency = hwctx->qos.latency;
456 xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
457 xrs_req->rqos.priority = hwctx->qos.priority;
458
459 xrs_req->rid = (uintptr_t)hwctx;
460
461 ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
462 if (ret)
463 XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
464
465 kfree(xrs_req);
466 return ret;
467 }
468
aie2_release_resource(struct amdxdna_hwctx * hwctx)469 static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
470 {
471 struct amdxdna_dev *xdna = hwctx->client->xdna;
472 int ret;
473
474 ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
475 if (ret)
476 XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
477 }
478
aie2_ctx_syncobj_create(struct amdxdna_hwctx * hwctx)479 static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
480 {
481 struct amdxdna_dev *xdna = hwctx->client->xdna;
482 struct drm_file *filp = hwctx->client->filp;
483 struct drm_syncobj *syncobj;
484 u32 hdl;
485 int ret;
486
487 hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
488
489 ret = drm_syncobj_create(&syncobj, 0, NULL);
490 if (ret) {
491 XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
492 return ret;
493 }
494 ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
495 if (ret) {
496 drm_syncobj_put(syncobj);
497 XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
498 return ret;
499 }
500 hwctx->priv->syncobj = syncobj;
501 hwctx->syncobj_hdl = hdl;
502
503 return 0;
504 }
505
aie2_ctx_syncobj_destroy(struct amdxdna_hwctx * hwctx)506 static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
507 {
508 /*
509 * The syncobj_hdl is owned by user space and will be cleaned up
510 * separately.
511 */
512 drm_syncobj_put(hwctx->priv->syncobj);
513 }
514
aie2_hwctx_init(struct amdxdna_hwctx * hwctx)515 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
516 {
517 struct amdxdna_client *client = hwctx->client;
518 struct amdxdna_dev *xdna = client->xdna;
519 struct drm_gpu_scheduler *sched;
520 struct amdxdna_hwctx_priv *priv;
521 struct amdxdna_gem_obj *heap;
522 struct amdxdna_dev_hdl *ndev;
523 int i, ret;
524
525 priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
526 if (!priv)
527 return -ENOMEM;
528 hwctx->priv = priv;
529
530 mutex_lock(&client->mm_lock);
531 heap = client->dev_heap;
532 if (!heap) {
533 XDNA_ERR(xdna, "The client dev heap object not exist");
534 mutex_unlock(&client->mm_lock);
535 ret = -ENOENT;
536 goto free_priv;
537 }
538 drm_gem_object_get(to_gobj(heap));
539 mutex_unlock(&client->mm_lock);
540 priv->heap = heap;
541 sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
542
543 ret = amdxdna_gem_pin(heap);
544 if (ret) {
545 XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
546 goto put_heap;
547 }
548
549 for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
550 struct amdxdna_gem_obj *abo;
551 struct amdxdna_drm_create_bo args = {
552 .flags = 0,
553 .type = AMDXDNA_BO_DEV,
554 .vaddr = 0,
555 .size = MAX_CHAIN_CMDBUF_SIZE,
556 };
557
558 abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
559 if (IS_ERR(abo)) {
560 ret = PTR_ERR(abo);
561 goto free_cmd_bufs;
562 }
563
564 XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
565 i, abo->mem.dev_addr, abo->mem.size);
566 priv->cmd_buf[i] = abo;
567 }
568
569 sched = &priv->sched;
570 mutex_init(&priv->io_lock);
571
572 fs_reclaim_acquire(GFP_KERNEL);
573 might_lock(&priv->io_lock);
574 fs_reclaim_release(GFP_KERNEL);
575
576 ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
577 HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
578 NULL, NULL, hwctx->name, xdna->ddev.dev);
579 if (ret) {
580 XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
581 goto free_cmd_bufs;
582 }
583
584 ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
585 &sched, 1, NULL);
586 if (ret) {
587 XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
588 goto free_sched;
589 }
590
591 ret = aie2_hwctx_col_list(hwctx);
592 if (ret) {
593 XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
594 goto free_entity;
595 }
596
597 ret = aie2_alloc_resource(hwctx);
598 if (ret) {
599 XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
600 goto free_col_list;
601 }
602
603 ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
604 heap->mem.userptr, heap->mem.size);
605 if (ret) {
606 XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
607 goto release_resource;
608 }
609
610 ret = aie2_ctx_syncobj_create(hwctx);
611 if (ret) {
612 XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
613 goto release_resource;
614 }
615
616 hwctx->status = HWCTX_STAT_INIT;
617 ndev = xdna->dev_handle;
618 ndev->hwctx_num++;
619
620 XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
621
622 return 0;
623
624 release_resource:
625 aie2_release_resource(hwctx);
626 free_col_list:
627 kfree(hwctx->col_list);
628 free_entity:
629 drm_sched_entity_destroy(&priv->entity);
630 free_sched:
631 drm_sched_fini(&priv->sched);
632 free_cmd_bufs:
633 for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
634 if (!priv->cmd_buf[i])
635 continue;
636 drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
637 }
638 amdxdna_gem_unpin(heap);
639 put_heap:
640 drm_gem_object_put(to_gobj(heap));
641 free_priv:
642 kfree(priv);
643 return ret;
644 }
645
aie2_hwctx_fini(struct amdxdna_hwctx * hwctx)646 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
647 {
648 struct amdxdna_dev_hdl *ndev;
649 struct amdxdna_dev *xdna;
650 int idx;
651
652 xdna = hwctx->client->xdna;
653 ndev = xdna->dev_handle;
654 ndev->hwctx_num--;
655 drm_sched_wqueue_stop(&hwctx->priv->sched);
656
657 /* Now, scheduler will not send command to device. */
658 aie2_release_resource(hwctx);
659
660 /*
661 * All submitted commands are aborted.
662 * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
663 * will return NODEV if it is called.
664 */
665 drm_sched_wqueue_start(&hwctx->priv->sched);
666
667 aie2_hwctx_wait_for_idle(hwctx);
668 drm_sched_entity_destroy(&hwctx->priv->entity);
669 drm_sched_fini(&hwctx->priv->sched);
670 aie2_ctx_syncobj_destroy(hwctx);
671
672 XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
673
674 for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
675 drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
676 amdxdna_gem_unpin(hwctx->priv->heap);
677 drm_gem_object_put(to_gobj(hwctx->priv->heap));
678
679 mutex_destroy(&hwctx->priv->io_lock);
680 kfree(hwctx->col_list);
681 kfree(hwctx->priv);
682 kfree(hwctx->cus);
683 }
684
aie2_hwctx_cu_config(struct amdxdna_hwctx * hwctx,void * buf,u32 size)685 static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
686 {
687 struct amdxdna_hwctx_param_config_cu *config = buf;
688 struct amdxdna_dev *xdna = hwctx->client->xdna;
689 u32 total_size;
690 int ret;
691
692 XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
693 if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
694 return -EINVAL;
695
696 if (hwctx->status != HWCTX_STAT_INIT) {
697 XDNA_ERR(xdna, "Not support re-config CU");
698 return -EINVAL;
699 }
700
701 if (!config->num_cus) {
702 XDNA_ERR(xdna, "Number of CU is zero");
703 return -EINVAL;
704 }
705
706 total_size = struct_size(config, cu_configs, config->num_cus);
707 if (total_size > size) {
708 XDNA_ERR(xdna, "CU config larger than size");
709 return -EINVAL;
710 }
711
712 hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
713 if (!hwctx->cus)
714 return -ENOMEM;
715
716 ret = aie2_config_cu(hwctx);
717 if (ret) {
718 XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
719 goto free_cus;
720 }
721
722 wmb(); /* To avoid locking in command submit when check status */
723 hwctx->status = HWCTX_STAT_READY;
724
725 return 0;
726
727 free_cus:
728 kfree(hwctx->cus);
729 hwctx->cus = NULL;
730 return ret;
731 }
732
aie2_hwctx_config(struct amdxdna_hwctx * hwctx,u32 type,u64 value,void * buf,u32 size)733 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
734 {
735 struct amdxdna_dev *xdna = hwctx->client->xdna;
736
737 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
738 switch (type) {
739 case DRM_AMDXDNA_HWCTX_CONFIG_CU:
740 return aie2_hwctx_cu_config(hwctx, buf, size);
741 case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
742 case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
743 return -EOPNOTSUPP;
744 default:
745 XDNA_DBG(xdna, "Not supported type %d", type);
746 return -EOPNOTSUPP;
747 }
748 }
749
aie2_populate_range(struct amdxdna_gem_obj * abo)750 static int aie2_populate_range(struct amdxdna_gem_obj *abo)
751 {
752 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
753 struct mm_struct *mm = abo->mem.notifier.mm;
754 struct hmm_range range = { 0 };
755 unsigned long timeout;
756 int ret;
757
758 XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
759 abo->mem.userptr, abo->mem.size);
760 range.notifier = &abo->mem.notifier;
761 range.start = abo->mem.userptr;
762 range.end = abo->mem.userptr + abo->mem.size;
763 range.hmm_pfns = abo->mem.pfns;
764 range.default_flags = HMM_PFN_REQ_FAULT;
765
766 if (!mmget_not_zero(mm))
767 return -EFAULT;
768
769 timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
770 again:
771 range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
772 mmap_read_lock(mm);
773 ret = hmm_range_fault(&range);
774 mmap_read_unlock(mm);
775 if (ret) {
776 if (time_after(jiffies, timeout)) {
777 ret = -ETIME;
778 goto put_mm;
779 }
780
781 if (ret == -EBUSY)
782 goto again;
783
784 goto put_mm;
785 }
786
787 down_read(&xdna->notifier_lock);
788 if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
789 up_read(&xdna->notifier_lock);
790 goto again;
791 }
792 abo->mem.map_invalid = false;
793 up_read(&xdna->notifier_lock);
794
795 put_mm:
796 mmput(mm);
797 return ret;
798 }
799
aie2_cmd_submit(struct amdxdna_hwctx * hwctx,struct amdxdna_sched_job * job,u64 * seq)800 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
801 {
802 struct amdxdna_dev *xdna = hwctx->client->xdna;
803 struct ww_acquire_ctx acquire_ctx;
804 struct dma_fence_chain *chain;
805 struct amdxdna_gem_obj *abo;
806 unsigned long timeout = 0;
807 int ret, i;
808
809 ret = down_interruptible(&hwctx->priv->job_sem);
810 if (ret) {
811 XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
812 return ret;
813 }
814
815 chain = dma_fence_chain_alloc();
816 if (!chain) {
817 XDNA_ERR(xdna, "Alloc fence chain failed");
818 ret = -ENOMEM;
819 goto up_sem;
820 }
821
822 ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
823 if (ret) {
824 XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
825 goto free_chain;
826 }
827
828 retry:
829 ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
830 if (ret) {
831 XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
832 goto cleanup_job;
833 }
834
835 for (i = 0; i < job->bo_cnt; i++) {
836 ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
837 if (ret) {
838 XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
839 drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
840 goto cleanup_job;
841 }
842 }
843
844 down_read(&xdna->notifier_lock);
845 for (i = 0; i < job->bo_cnt; i++) {
846 abo = to_xdna_obj(job->bos[i]);
847 if (abo->mem.map_invalid) {
848 up_read(&xdna->notifier_lock);
849 drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
850 if (!timeout) {
851 timeout = jiffies +
852 msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
853 } else if (time_after(jiffies, timeout)) {
854 ret = -ETIME;
855 goto cleanup_job;
856 }
857
858 ret = aie2_populate_range(abo);
859 if (ret)
860 goto cleanup_job;
861 goto retry;
862 }
863 }
864
865 mutex_lock(&hwctx->priv->io_lock);
866 drm_sched_job_arm(&job->base);
867 job->out_fence = dma_fence_get(&job->base.s_fence->finished);
868 for (i = 0; i < job->bo_cnt; i++)
869 dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
870 job->seq = hwctx->priv->seq++;
871 kref_get(&job->refcnt);
872 drm_sched_entity_push_job(&job->base);
873
874 *seq = job->seq;
875 drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
876 mutex_unlock(&hwctx->priv->io_lock);
877
878 up_read(&xdna->notifier_lock);
879 drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
880
881 aie2_job_put(job);
882
883 return 0;
884
885 cleanup_job:
886 drm_sched_job_cleanup(&job->base);
887 free_chain:
888 dma_fence_chain_free(chain);
889 up_sem:
890 up(&hwctx->priv->job_sem);
891 job->job_done = true;
892 return ret;
893 }
894
aie2_hmm_invalidate(struct amdxdna_gem_obj * abo,unsigned long cur_seq)895 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
896 unsigned long cur_seq)
897 {
898 struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
899 struct drm_gem_object *gobj = to_gobj(abo);
900 long ret;
901
902 down_write(&xdna->notifier_lock);
903 abo->mem.map_invalid = true;
904 mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
905 up_write(&xdna->notifier_lock);
906 ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
907 true, MAX_SCHEDULE_TIMEOUT);
908 if (!ret || ret == -ERESTARTSYS)
909 XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
910 }
911