1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6 #include <drm/drm_device.h>
7 #include <drm/drm_managed.h>
8 #include <linux/bitfield.h>
9 #include <linux/interrupt.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/xarray.h>
13
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/amdxdna.h>
16
17 #include "amdxdna_mailbox.h"
18
19 #define MB_ERR(chann, fmt, args...) \
20 ({ \
21 typeof(chann) _chann = chann; \
22 dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
23 (_chann)->msix_irq, ##args); \
24 })
25 #define MB_DBG(chann, fmt, args...) \
26 ({ \
27 typeof(chann) _chann = chann; \
28 dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
29 (_chann)->msix_irq, ##args); \
30 })
31 #define MB_WARN_ONCE(chann, fmt, args...) \
32 ({ \
33 typeof(chann) _chann = chann; \
34 dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
35 (_chann)->msix_irq, ##args); \
36 })
37
38 #define MAGIC_VAL 0x1D000000U
39 #define MAGIC_VAL_MASK 0xFF000000
40 #define MAX_MSG_ID_ENTRIES 256
41 #define MSG_RX_TIMER 200 /* milliseconds */
42 #define MAILBOX_NAME "xdna_mailbox"
43
44 enum channel_res_type {
45 CHAN_RES_X2I,
46 CHAN_RES_I2X,
47 CHAN_RES_NUM
48 };
49
50 struct mailbox {
51 struct device *dev;
52 struct xdna_mailbox_res res;
53 };
54
55 struct mailbox_channel {
56 struct mailbox *mb;
57 struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
58 int msix_irq;
59 u32 iohub_int_addr;
60 struct xarray chan_xa;
61 u32 next_msgid;
62 u32 x2i_tail;
63
64 /* Received msg related fields */
65 struct workqueue_struct *work_q;
66 struct work_struct rx_work;
67 u32 i2x_head;
68 bool bad_state;
69 };
70
71 #define MSG_BODY_SZ GENMASK(10, 0)
72 #define MSG_PROTO_VER GENMASK(23, 16)
73 struct xdna_msg_header {
74 __u32 total_size;
75 __u32 sz_ver;
76 __u32 id;
77 __u32 opcode;
78 } __packed;
79
80 static_assert(sizeof(struct xdna_msg_header) == 16);
81
82 struct mailbox_pkg {
83 struct xdna_msg_header header;
84 __u32 payload[];
85 };
86
87 /* The protocol version. */
88 #define MSG_PROTOCOL_VERSION 0x1
89 /* The tombstone value. */
90 #define TOMBSTONE 0xDEADFACE
91
92 struct mailbox_msg {
93 void *handle;
94 int (*notify_cb)(void *handle, const u32 *data, size_t size);
95 size_t pkg_size; /* package size in bytes */
96 struct mailbox_pkg pkg;
97 };
98
mailbox_reg_write(struct mailbox_channel * mb_chann,u32 mbox_reg,u32 data)99 static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
100 {
101 struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
102 void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
103
104 writel(data, ringbuf_addr);
105 }
106
mailbox_reg_read(struct mailbox_channel * mb_chann,u32 mbox_reg)107 static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
108 {
109 struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
110 void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
111
112 return readl(ringbuf_addr);
113 }
114
mailbox_reg_read_non_zero(struct mailbox_channel * mb_chann,u32 mbox_reg,u32 * val)115 static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
116 {
117 struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
118 void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
119 int ret, value;
120
121 /* Poll till value is not zero */
122 ret = readx_poll_timeout(readl, ringbuf_addr, value,
123 value, 1 /* us */, 100);
124 if (ret < 0)
125 return ret;
126
127 *val = value;
128 return 0;
129 }
130
131 static inline void
mailbox_set_headptr(struct mailbox_channel * mb_chann,u32 headptr_val)132 mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
133 {
134 mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
135 mb_chann->i2x_head = headptr_val;
136 }
137
138 static inline void
mailbox_set_tailptr(struct mailbox_channel * mb_chann,u32 tailptr_val)139 mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
140 {
141 mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
142 mb_chann->x2i_tail = tailptr_val;
143 }
144
145 static inline u32
mailbox_get_headptr(struct mailbox_channel * mb_chann,enum channel_res_type type)146 mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
147 {
148 return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
149 }
150
151 static inline u32
mailbox_get_tailptr(struct mailbox_channel * mb_chann,enum channel_res_type type)152 mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
153 {
154 return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
155 }
156
157 static inline u32
mailbox_get_ringbuf_size(struct mailbox_channel * mb_chann,enum channel_res_type type)158 mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
159 {
160 return mb_chann->res[type].rb_size;
161 }
162
mailbox_validate_msgid(int msg_id)163 static inline int mailbox_validate_msgid(int msg_id)
164 {
165 return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
166 }
167
mailbox_acquire_msgid(struct mailbox_channel * mb_chann,struct mailbox_msg * mb_msg)168 static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
169 {
170 u32 msg_id;
171 int ret;
172
173 ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
174 XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
175 &mb_chann->next_msgid, GFP_NOWAIT);
176 if (ret < 0)
177 return ret;
178
179 /*
180 * Add MAGIC_VAL to the higher bits.
181 */
182 msg_id |= MAGIC_VAL;
183 return msg_id;
184 }
185
mailbox_release_msgid(struct mailbox_channel * mb_chann,int msg_id)186 static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
187 {
188 msg_id &= ~MAGIC_VAL_MASK;
189 xa_erase_irq(&mb_chann->chan_xa, msg_id);
190 }
191
mailbox_release_msg(struct mailbox_channel * mb_chann,struct mailbox_msg * mb_msg)192 static void mailbox_release_msg(struct mailbox_channel *mb_chann,
193 struct mailbox_msg *mb_msg)
194 {
195 MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
196 mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
197 mb_msg->notify_cb(mb_msg->handle, NULL, 0);
198 kfree(mb_msg);
199 }
200
201 static int
mailbox_send_msg(struct mailbox_channel * mb_chann,struct mailbox_msg * mb_msg)202 mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
203 {
204 void __iomem *write_addr;
205 u32 ringbuf_size;
206 u32 head, tail;
207 u32 start_addr;
208 u32 tmp_tail;
209
210 head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
211 tail = mb_chann->x2i_tail;
212 ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
213 start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
214 tmp_tail = tail + mb_msg->pkg_size;
215
216 if (tail < head && tmp_tail >= head)
217 goto no_space;
218
219 if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
220 mb_msg->pkg_size >= head))
221 goto no_space;
222
223 if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
224 write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
225 writel(TOMBSTONE, write_addr);
226
227 /* tombstone is set. Write from the start of the ringbuf */
228 tail = 0;
229 }
230
231 write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
232 memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
233 mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
234
235 trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
236 mb_msg->pkg.header.opcode,
237 mb_msg->pkg.header.id);
238
239 return 0;
240
241 no_space:
242 return -ENOSPC;
243 }
244
245 static int
mailbox_get_resp(struct mailbox_channel * mb_chann,struct xdna_msg_header * header,void * data)246 mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
247 void *data)
248 {
249 struct mailbox_msg *mb_msg;
250 int msg_id;
251 int ret;
252
253 msg_id = header->id;
254 if (!mailbox_validate_msgid(msg_id)) {
255 MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
256 return -EINVAL;
257 }
258
259 msg_id &= ~MAGIC_VAL_MASK;
260 mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
261 if (!mb_msg) {
262 MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
263 return -EINVAL;
264 }
265
266 MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
267 header->opcode, header->total_size, header->id);
268 ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
269 if (unlikely(ret))
270 MB_ERR(mb_chann, "Message callback ret %d", ret);
271
272 kfree(mb_msg);
273 return ret;
274 }
275
mailbox_get_msg(struct mailbox_channel * mb_chann)276 static int mailbox_get_msg(struct mailbox_channel *mb_chann)
277 {
278 struct xdna_msg_header header;
279 void __iomem *read_addr;
280 u32 msg_size, rest;
281 u32 ringbuf_size;
282 u32 head, tail;
283 u32 start_addr;
284 int ret;
285
286 if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
287 return -EINVAL;
288 head = mb_chann->i2x_head;
289 ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
290 start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
291
292 if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
293 MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
294 return -EINVAL;
295 }
296
297 /* ringbuf empty */
298 if (head == tail)
299 return -ENOENT;
300
301 if (head == ringbuf_size)
302 head = 0;
303
304 /* Peek size of the message or TOMBSTONE */
305 read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
306 header.total_size = readl(read_addr);
307 /* size is TOMBSTONE, set next read from 0 */
308 if (header.total_size == TOMBSTONE) {
309 if (head < tail) {
310 MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
311 head, tail);
312 return -EINVAL;
313 }
314 mailbox_set_headptr(mb_chann, 0);
315 return 0;
316 }
317
318 if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
319 MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
320 return -EINVAL;
321 }
322 msg_size = sizeof(header) + header.total_size;
323
324 if (msg_size > ringbuf_size - head || msg_size > tail - head) {
325 MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
326 msg_size, tail, head);
327 return -EINVAL;
328 }
329
330 rest = sizeof(header) - sizeof(u32);
331 read_addr += sizeof(u32);
332 memcpy_fromio((u32 *)&header + 1, read_addr, rest);
333 read_addr += rest;
334
335 ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
336
337 mailbox_set_headptr(mb_chann, head + msg_size);
338 /* After update head, it can equal to ringbuf_size. This is expected. */
339 trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
340 header.opcode, header.id);
341
342 return ret;
343 }
344
mailbox_irq_handler(int irq,void * p)345 static irqreturn_t mailbox_irq_handler(int irq, void *p)
346 {
347 struct mailbox_channel *mb_chann = p;
348
349 trace_mbox_irq_handle(MAILBOX_NAME, irq);
350 /* Schedule a rx_work to call the callback functions */
351 queue_work(mb_chann->work_q, &mb_chann->rx_work);
352 /* Clear IOHUB register */
353 mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
354
355 return IRQ_HANDLED;
356 }
357
mailbox_rx_worker(struct work_struct * rx_work)358 static void mailbox_rx_worker(struct work_struct *rx_work)
359 {
360 struct mailbox_channel *mb_chann;
361 int ret;
362
363 mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
364
365 if (READ_ONCE(mb_chann->bad_state)) {
366 MB_ERR(mb_chann, "Channel in bad state, work aborted");
367 return;
368 }
369
370 while (1) {
371 /*
372 * If return is 0, keep consuming next message, until there is
373 * no messages or an error happened.
374 */
375 ret = mailbox_get_msg(mb_chann);
376 if (ret == -ENOENT)
377 break;
378
379 /* Other error means device doesn't look good, disable irq. */
380 if (unlikely(ret)) {
381 MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
382 WRITE_ONCE(mb_chann->bad_state, true);
383 disable_irq(mb_chann->msix_irq);
384 break;
385 }
386 }
387 }
388
xdna_mailbox_send_msg(struct mailbox_channel * mb_chann,const struct xdna_mailbox_msg * msg,u64 tx_timeout)389 int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
390 const struct xdna_mailbox_msg *msg, u64 tx_timeout)
391 {
392 struct xdna_msg_header *header;
393 struct mailbox_msg *mb_msg;
394 size_t pkg_size;
395 int ret;
396
397 pkg_size = sizeof(*header) + msg->send_size;
398 if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
399 MB_ERR(mb_chann, "Message size larger than ringbuf size");
400 return -EINVAL;
401 }
402
403 if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
404 MB_ERR(mb_chann, "Message must be 4 bytes align");
405 return -EINVAL;
406 }
407
408 /* The fist word in payload can NOT be TOMBSTONE */
409 if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
410 MB_ERR(mb_chann, "Tomb stone in data");
411 return -EINVAL;
412 }
413
414 if (READ_ONCE(mb_chann->bad_state)) {
415 MB_ERR(mb_chann, "Channel in bad state");
416 return -EPIPE;
417 }
418
419 mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
420 if (!mb_msg)
421 return -ENOMEM;
422
423 mb_msg->handle = msg->handle;
424 mb_msg->notify_cb = msg->notify_cb;
425 mb_msg->pkg_size = pkg_size;
426
427 header = &mb_msg->pkg.header;
428 /*
429 * Hardware use total_size and size to split huge message.
430 * We do not support it here. Thus the values are the same.
431 */
432 header->total_size = msg->send_size;
433 header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
434 FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
435 header->opcode = msg->opcode;
436 memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
437
438 ret = mailbox_acquire_msgid(mb_chann, mb_msg);
439 if (unlikely(ret < 0)) {
440 MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
441 goto msg_id_failed;
442 }
443 header->id = ret;
444
445 MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
446 header->opcode, header->total_size, header->id);
447
448 ret = mailbox_send_msg(mb_chann, mb_msg);
449 if (ret) {
450 MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
451 goto release_id;
452 }
453
454 return 0;
455
456 release_id:
457 mailbox_release_msgid(mb_chann, header->id);
458 msg_id_failed:
459 kfree(mb_msg);
460 return ret;
461 }
462
463 struct mailbox_channel *
xdna_mailbox_create_channel(struct mailbox * mb,const struct xdna_mailbox_chann_res * x2i,const struct xdna_mailbox_chann_res * i2x,u32 iohub_int_addr,int mb_irq)464 xdna_mailbox_create_channel(struct mailbox *mb,
465 const struct xdna_mailbox_chann_res *x2i,
466 const struct xdna_mailbox_chann_res *i2x,
467 u32 iohub_int_addr,
468 int mb_irq)
469 {
470 struct mailbox_channel *mb_chann;
471 int ret;
472
473 if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
474 pr_err("Ring buf size must be power of 2");
475 return NULL;
476 }
477
478 mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
479 if (!mb_chann)
480 return NULL;
481
482 mb_chann->mb = mb;
483 mb_chann->msix_irq = mb_irq;
484 mb_chann->iohub_int_addr = iohub_int_addr;
485 memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
486 memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
487
488 xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
489 mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
490 mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
491
492 INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
493 mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
494 if (!mb_chann->work_q) {
495 MB_ERR(mb_chann, "Create workqueue failed");
496 goto free_and_out;
497 }
498
499 /* Everything look good. Time to enable irq handler */
500 ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
501 if (ret) {
502 MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
503 goto destroy_wq;
504 }
505
506 mb_chann->bad_state = false;
507
508 MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
509 return mb_chann;
510
511 destroy_wq:
512 destroy_workqueue(mb_chann->work_q);
513 free_and_out:
514 kfree(mb_chann);
515 return NULL;
516 }
517
xdna_mailbox_destroy_channel(struct mailbox_channel * mb_chann)518 int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
519 {
520 struct mailbox_msg *mb_msg;
521 unsigned long msg_id;
522
523 MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
524 free_irq(mb_chann->msix_irq, mb_chann);
525 destroy_workqueue(mb_chann->work_q);
526 /* We can clean up and release resources */
527
528 xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
529 mailbox_release_msg(mb_chann, mb_msg);
530
531 xa_destroy(&mb_chann->chan_xa);
532
533 MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
534 kfree(mb_chann);
535 return 0;
536 }
537
xdna_mailbox_stop_channel(struct mailbox_channel * mb_chann)538 void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
539 {
540 /* Disable an irq and wait. This might sleep. */
541 disable_irq(mb_chann->msix_irq);
542
543 /* Cancel RX work and wait for it to finish */
544 cancel_work_sync(&mb_chann->rx_work);
545 MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
546 }
547
xdnam_mailbox_create(struct drm_device * ddev,const struct xdna_mailbox_res * res)548 struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
549 const struct xdna_mailbox_res *res)
550 {
551 struct mailbox *mb;
552
553 mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
554 if (!mb)
555 return NULL;
556 mb->dev = ddev->dev;
557
558 /* mailbox and ring buf base and size information */
559 memcpy(&mb->res, res, sizeof(*res));
560
561 return mb;
562 }
563