1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Microchip Inter-Processor communication (IPC) driver
4 *
5 * Copyright (c) 2021 - 2024 Microchip Technology Inc. All rights reserved.
6 *
7 * Author: Valentina Fernandez <[email protected]>
8 *
9 */
10
11 #include <linux/io.h>
12 #include <linux/err.h>
13 #include <linux/smp.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/of_device.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/platform_device.h>
21 #include <linux/mailbox/mchp-ipc.h>
22 #include <asm/sbi.h>
23 #include <asm/vendorid_list.h>
24
25 #define IRQ_STATUS_BITS 12
26 #define NUM_CHANS_PER_CLUSTER 5
27 #define IPC_DMA_BIT_MASK 32
28 #define SBI_EXT_MICROCHIP_TECHNOLOGY (SBI_EXT_VENDOR_START | \
29 MICROCHIP_VENDOR_ID)
30
31 enum {
32 SBI_EXT_IPC_PROBE = 0x100,
33 SBI_EXT_IPC_CH_INIT,
34 SBI_EXT_IPC_SEND,
35 SBI_EXT_IPC_RECEIVE,
36 SBI_EXT_IPC_STATUS,
37 };
38
39 enum ipc_hw {
40 MIV_IHC,
41 };
42
43 /**
44 * struct mchp_ipc_mbox_info - IPC probe message format
45 *
46 * @hw_type: IPC implementation available in the hardware
47 * @num_channels: number of IPC channels available in the hardware
48 *
49 * Used to retrieve information on the IPC implementation
50 * using the SBI_EXT_IPC_PROBE SBI function id.
51 */
52 struct mchp_ipc_mbox_info {
53 enum ipc_hw hw_type;
54 u8 num_channels;
55 };
56
57 /**
58 * struct mchp_ipc_init - IPC channel init message format
59 *
60 * @max_msg_size: maxmimum message size in bytes of a given channel
61 *
62 * struct used by the SBI_EXT_IPC_CH_INIT SBI function id to get
63 * the max message size in bytes of the initialized channel.
64 */
65 struct mchp_ipc_init {
66 u16 max_msg_size;
67 };
68
69 /**
70 * struct mchp_ipc_status - IPC status message format
71 *
72 * @status: interrupt status for all channels associated to a cluster
73 * @cluster: specifies the cluster instance that originated an irq
74 *
75 * struct used by the SBI_EXT_IPC_STATUS SBI function id to get
76 * the message present and message clear interrupt status for all the
77 * channels associated to a cluster.
78 */
79 struct mchp_ipc_status {
80 u32 status;
81 u8 cluster;
82 };
83
84 /**
85 * struct mchp_ipc_sbi_msg - IPC SBI payload message
86 *
87 * @buf_addr: physical address where the received data should be copied to
88 * @size: maximum size(in bytes) that can be stored in the buffer pointed to by `buf`
89 * @irq_type: mask representing the irq types that triggered an irq
90 *
91 * struct used by the SBI_EXT_IPC_SEND/SBI_EXT_IPC_RECEIVE SBI function
92 * ids to send/receive a message from an associated processor using
93 * the IPC.
94 */
95 struct mchp_ipc_sbi_msg {
96 u64 buf_addr;
97 u16 size;
98 u8 irq_type;
99 };
100
101 struct mchp_ipc_cluster_cfg {
102 void *buf_base;
103 phys_addr_t buf_base_addr;
104 int irq;
105 };
106
107 struct mchp_ipc_sbi_mbox {
108 struct device *dev;
109 struct mbox_chan *chans;
110 struct mchp_ipc_cluster_cfg *cluster_cfg;
111 void *buf_base;
112 unsigned long buf_base_addr;
113 struct mbox_controller controller;
114 enum ipc_hw hw_type;
115 };
116
mchp_ipc_sbi_chan_send(u32 command,u32 channel,unsigned long address)117 static int mchp_ipc_sbi_chan_send(u32 command, u32 channel, unsigned long address)
118 {
119 struct sbiret ret;
120
121 ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, channel,
122 address, 0, 0, 0, 0);
123
124 if (ret.error)
125 return sbi_err_map_linux_errno(ret.error);
126 else
127 return ret.value;
128 }
129
mchp_ipc_sbi_send(u32 command,unsigned long address)130 static int mchp_ipc_sbi_send(u32 command, unsigned long address)
131 {
132 struct sbiret ret;
133
134 ret = sbi_ecall(SBI_EXT_MICROCHIP_TECHNOLOGY, command, address,
135 0, 0, 0, 0, 0);
136
137 if (ret.error)
138 return sbi_err_map_linux_errno(ret.error);
139 else
140 return ret.value;
141 }
142
to_mchp_ipc_mbox(struct mbox_controller * mbox)143 static struct mchp_ipc_sbi_mbox *to_mchp_ipc_mbox(struct mbox_controller *mbox)
144 {
145 return container_of(mbox, struct mchp_ipc_sbi_mbox, controller);
146 }
147
mchp_ipc_prepare_receive_req(struct mbox_chan * chan)148 static inline void mchp_ipc_prepare_receive_req(struct mbox_chan *chan)
149 {
150 struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
151 struct mchp_ipc_sbi_msg request;
152
153 request.buf_addr = chan_info->msg_buf_rx_addr;
154 request.size = chan_info->max_msg_size;
155 memcpy(chan_info->buf_base_rx, &request, sizeof(struct mchp_ipc_sbi_msg));
156 }
157
mchp_ipc_process_received_data(struct mbox_chan * chan,struct mchp_ipc_msg * ipc_msg)158 static inline void mchp_ipc_process_received_data(struct mbox_chan *chan,
159 struct mchp_ipc_msg *ipc_msg)
160 {
161 struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
162 struct mchp_ipc_sbi_msg sbi_msg;
163
164 memcpy(&sbi_msg, chan_info->buf_base_rx, sizeof(struct mchp_ipc_sbi_msg));
165 ipc_msg->buf = (u32 *)chan_info->msg_buf_rx;
166 ipc_msg->size = sbi_msg.size;
167 }
168
mchp_ipc_cluster_aggr_isr(int irq,void * data)169 static irqreturn_t mchp_ipc_cluster_aggr_isr(int irq, void *data)
170 {
171 struct mbox_chan *chan;
172 struct mchp_ipc_sbi_chan *chan_info;
173 struct mchp_ipc_sbi_mbox *ipc = (struct mchp_ipc_sbi_mbox *)data;
174 struct mchp_ipc_msg ipc_msg;
175 struct mchp_ipc_status status_msg;
176 int ret;
177 unsigned long hartid;
178 u32 i, chan_index, chan_id;
179
180 /* Find out the hart that originated the irq */
181 for_each_online_cpu(i) {
182 hartid = cpuid_to_hartid_map(i);
183 if (irq == ipc->cluster_cfg[hartid].irq)
184 break;
185 }
186
187 status_msg.cluster = hartid;
188 memcpy(ipc->cluster_cfg[hartid].buf_base, &status_msg, sizeof(struct mchp_ipc_status));
189
190 ret = mchp_ipc_sbi_send(SBI_EXT_IPC_STATUS, ipc->cluster_cfg[hartid].buf_base_addr);
191 if (ret < 0) {
192 dev_err_ratelimited(ipc->dev, "could not get IHC irq status ret=%d\n", ret);
193 return IRQ_HANDLED;
194 }
195
196 memcpy(&status_msg, ipc->cluster_cfg[hartid].buf_base, sizeof(struct mchp_ipc_status));
197
198 /*
199 * Iterate over each bit set in the IHC interrupt status register (IRQ_STATUS) to identify
200 * the channel(s) that have a message to be processed/acknowledged.
201 * The bits are organized in alternating format, where each pair of bits represents
202 * the status of the message present and message clear interrupts for each cluster/hart
203 * (from hart 0 to hart 5). Each cluster can have up to 5 fixed channels associated.
204 */
205
206 for_each_set_bit(i, (unsigned long *)&status_msg.status, IRQ_STATUS_BITS) {
207 /* Find out the destination hart that triggered the interrupt */
208 chan_index = i / 2;
209
210 /*
211 * The IP has no loopback channels, so we need to decrement the index when
212 * the target hart has a greater index than our own
213 */
214 if (chan_index >= status_msg.cluster)
215 chan_index--;
216
217 /*
218 * Calculate the channel id given the hart and channel index. Channel IDs
219 * are unique across all clusters of an IPC, and iterate contiguously
220 * across all clusters.
221 */
222 chan_id = status_msg.cluster * (NUM_CHANS_PER_CLUSTER + chan_index);
223
224 chan = &ipc->chans[chan_id];
225 chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
226
227 if (i % 2 == 0) {
228 mchp_ipc_prepare_receive_req(chan);
229 ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
230 chan_info->buf_base_rx_addr);
231 if (ret < 0)
232 continue;
233
234 mchp_ipc_process_received_data(chan, &ipc_msg);
235 mbox_chan_received_data(&ipc->chans[chan_id], (void *)&ipc_msg);
236
237 } else {
238 ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_RECEIVE, chan_id,
239 chan_info->buf_base_rx_addr);
240 mbox_chan_txdone(&ipc->chans[chan_id], ret);
241 }
242 }
243 return IRQ_HANDLED;
244 }
245
mchp_ipc_send_data(struct mbox_chan * chan,void * data)246 static int mchp_ipc_send_data(struct mbox_chan *chan, void *data)
247 {
248 struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
249 const struct mchp_ipc_msg *msg = data;
250 struct mchp_ipc_sbi_msg sbi_payload;
251
252 memcpy(chan_info->msg_buf_tx, msg->buf, msg->size);
253 sbi_payload.buf_addr = chan_info->msg_buf_tx_addr;
254 sbi_payload.size = msg->size;
255 memcpy(chan_info->buf_base_tx, &sbi_payload, sizeof(sbi_payload));
256
257 return mchp_ipc_sbi_chan_send(SBI_EXT_IPC_SEND, chan_info->id, chan_info->buf_base_tx_addr);
258 }
259
mchp_ipc_startup(struct mbox_chan * chan)260 static int mchp_ipc_startup(struct mbox_chan *chan)
261 {
262 struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
263 struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(chan->mbox);
264 struct mchp_ipc_init ch_init_msg;
265 int ret;
266
267 /*
268 * The TX base buffer is used to transmit two types of messages:
269 * - struct mchp_ipc_init to initialize the channel
270 * - struct mchp_ipc_sbi_msg to transmit user data/payload
271 * Ensure the TX buffer size is large enough to accommodate either message type.
272 */
273 size_t max_size = max(sizeof(struct mchp_ipc_init), sizeof(struct mchp_ipc_sbi_msg));
274
275 chan_info->buf_base_tx = kmalloc(max_size, GFP_KERNEL);
276 if (!chan_info->buf_base_tx) {
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 chan_info->buf_base_tx_addr = __pa(chan_info->buf_base_tx);
282
283 chan_info->buf_base_rx = kmalloc(max_size, GFP_KERNEL);
284 if (!chan_info->buf_base_rx) {
285 ret = -ENOMEM;
286 goto fail_free_buf_base_tx;
287 }
288
289 chan_info->buf_base_rx_addr = __pa(chan_info->buf_base_rx);
290
291 ret = mchp_ipc_sbi_chan_send(SBI_EXT_IPC_CH_INIT, chan_info->id,
292 chan_info->buf_base_tx_addr);
293 if (ret < 0) {
294 dev_err(ipc->dev, "channel %u init failed\n", chan_info->id);
295 goto fail_free_buf_base_rx;
296 }
297
298 memcpy(&ch_init_msg, chan_info->buf_base_tx, sizeof(struct mchp_ipc_init));
299 chan_info->max_msg_size = ch_init_msg.max_msg_size;
300
301 chan_info->msg_buf_tx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
302 if (!chan_info->msg_buf_tx) {
303 ret = -ENOMEM;
304 goto fail_free_buf_base_rx;
305 }
306
307 chan_info->msg_buf_tx_addr = __pa(chan_info->msg_buf_tx);
308
309 chan_info->msg_buf_rx = kmalloc(chan_info->max_msg_size, GFP_KERNEL);
310 if (!chan_info->msg_buf_rx) {
311 ret = -ENOMEM;
312 goto fail_free_buf_msg_tx;
313 }
314
315 chan_info->msg_buf_rx_addr = __pa(chan_info->msg_buf_rx);
316
317 switch (ipc->hw_type) {
318 case MIV_IHC:
319 return 0;
320 default:
321 goto fail_free_buf_msg_rx;
322 }
323
324 if (ret) {
325 dev_err(ipc->dev, "failed to register interrupt(s)\n");
326 goto fail_free_buf_msg_rx;
327 }
328
329 return ret;
330
331 fail_free_buf_msg_rx:
332 kfree(chan_info->msg_buf_rx);
333 fail_free_buf_msg_tx:
334 kfree(chan_info->msg_buf_tx);
335 fail_free_buf_base_rx:
336 kfree(chan_info->buf_base_rx);
337 fail_free_buf_base_tx:
338 kfree(chan_info->buf_base_tx);
339 fail:
340 return ret;
341 }
342
mchp_ipc_shutdown(struct mbox_chan * chan)343 static void mchp_ipc_shutdown(struct mbox_chan *chan)
344 {
345 struct mchp_ipc_sbi_chan *chan_info = (struct mchp_ipc_sbi_chan *)chan->con_priv;
346
347 kfree(chan_info->buf_base_tx);
348 kfree(chan_info->buf_base_rx);
349 kfree(chan_info->msg_buf_tx);
350 kfree(chan_info->msg_buf_rx);
351 }
352
353 static const struct mbox_chan_ops mchp_ipc_ops = {
354 .startup = mchp_ipc_startup,
355 .send_data = mchp_ipc_send_data,
356 .shutdown = mchp_ipc_shutdown,
357 };
358
mchp_ipc_mbox_xlate(struct mbox_controller * controller,const struct of_phandle_args * spec)359 static struct mbox_chan *mchp_ipc_mbox_xlate(struct mbox_controller *controller,
360 const struct of_phandle_args *spec)
361 {
362 struct mchp_ipc_sbi_mbox *ipc = to_mchp_ipc_mbox(controller);
363 unsigned int chan_id = spec->args[0];
364
365 if (chan_id >= ipc->controller.num_chans) {
366 dev_err(ipc->dev, "invalid channel id %d\n", chan_id);
367 return ERR_PTR(-EINVAL);
368 }
369
370 return &ipc->chans[chan_id];
371 }
372
mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox * ipc)373 static int mchp_ipc_get_cluster_aggr_irq(struct mchp_ipc_sbi_mbox *ipc)
374 {
375 struct platform_device *pdev = to_platform_device(ipc->dev);
376 char *irq_name;
377 int cpuid, ret;
378 unsigned long hartid;
379 bool irq_found = false;
380
381 for_each_online_cpu(cpuid) {
382 hartid = cpuid_to_hartid_map(cpuid);
383 irq_name = devm_kasprintf(ipc->dev, GFP_KERNEL, "hart-%lu", hartid);
384 ret = platform_get_irq_byname_optional(pdev, irq_name);
385 if (ret <= 0)
386 continue;
387
388 ipc->cluster_cfg[hartid].irq = ret;
389 ret = devm_request_irq(ipc->dev, ipc->cluster_cfg[hartid].irq,
390 mchp_ipc_cluster_aggr_isr, IRQF_SHARED,
391 "miv-ihc-irq", ipc);
392 if (ret)
393 return ret;
394
395 ipc->cluster_cfg[hartid].buf_base = devm_kmalloc(ipc->dev,
396 sizeof(struct mchp_ipc_status),
397 GFP_KERNEL);
398
399 if (!ipc->cluster_cfg[hartid].buf_base)
400 return -ENOMEM;
401
402 ipc->cluster_cfg[hartid].buf_base_addr = __pa(ipc->cluster_cfg[hartid].buf_base);
403
404 irq_found = true;
405 }
406
407 return irq_found;
408 }
409
mchp_ipc_probe(struct platform_device * pdev)410 static int mchp_ipc_probe(struct platform_device *pdev)
411 {
412 struct device *dev = &pdev->dev;
413 struct mchp_ipc_mbox_info ipc_info;
414 struct mchp_ipc_sbi_mbox *ipc;
415 struct mchp_ipc_sbi_chan *priv;
416 bool irq_avail = false;
417 int ret;
418 u32 chan_id;
419
420 ret = sbi_probe_extension(SBI_EXT_MICROCHIP_TECHNOLOGY);
421 if (ret <= 0)
422 return dev_err_probe(dev, ret, "Microchip SBI extension not detected\n");
423
424 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
425 if (!ipc)
426 return -ENOMEM;
427
428 platform_set_drvdata(pdev, ipc);
429
430 ipc->buf_base = devm_kmalloc(dev, sizeof(struct mchp_ipc_mbox_info), GFP_KERNEL);
431 if (!ipc->buf_base)
432 return -ENOMEM;
433
434 ipc->buf_base_addr = __pa(ipc->buf_base);
435
436 ret = mchp_ipc_sbi_send(SBI_EXT_IPC_PROBE, ipc->buf_base_addr);
437 if (ret < 0)
438 return dev_err_probe(dev, ret, "could not probe IPC SBI service\n");
439
440 memcpy(&ipc_info, ipc->buf_base, sizeof(struct mchp_ipc_mbox_info));
441 ipc->controller.num_chans = ipc_info.num_channels;
442 ipc->hw_type = ipc_info.hw_type;
443
444 ipc->chans = devm_kcalloc(dev, ipc->controller.num_chans, sizeof(*ipc->chans), GFP_KERNEL);
445 if (!ipc->chans)
446 return -ENOMEM;
447
448 ipc->dev = dev;
449 ipc->controller.txdone_irq = true;
450 ipc->controller.dev = ipc->dev;
451 ipc->controller.ops = &mchp_ipc_ops;
452 ipc->controller.chans = ipc->chans;
453 ipc->controller.of_xlate = mchp_ipc_mbox_xlate;
454
455 for (chan_id = 0; chan_id < ipc->controller.num_chans; chan_id++) {
456 priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
457 if (!priv)
458 return -ENOMEM;
459
460 ipc->chans[chan_id].con_priv = priv;
461 priv->id = chan_id;
462 }
463
464 if (ipc->hw_type == MIV_IHC) {
465 ipc->cluster_cfg = devm_kcalloc(dev, num_online_cpus(),
466 sizeof(struct mchp_ipc_cluster_cfg),
467 GFP_KERNEL);
468 if (!ipc->cluster_cfg)
469 return -ENOMEM;
470
471 if (mchp_ipc_get_cluster_aggr_irq(ipc))
472 irq_avail = true;
473 }
474
475 if (!irq_avail)
476 return dev_err_probe(dev, -ENODEV, "missing interrupt property\n");
477
478 ret = devm_mbox_controller_register(dev, &ipc->controller);
479 if (ret)
480 return dev_err_probe(dev, ret,
481 "Inter-Processor communication (IPC) registration failed\n");
482
483 return 0;
484 }
485
486 static const struct of_device_id mchp_ipc_of_match[] = {
487 {.compatible = "microchip,sbi-ipc", },
488 {}
489 };
490 MODULE_DEVICE_TABLE(of, mchp_ipc_of_match);
491
492 static struct platform_driver mchp_ipc_driver = {
493 .driver = {
494 .name = "microchip_ipc",
495 .of_match_table = mchp_ipc_of_match,
496 },
497 .probe = mchp_ipc_probe,
498 };
499
500 module_platform_driver(mchp_ipc_driver);
501
502 MODULE_LICENSE("GPL");
503 MODULE_AUTHOR("Valentina Fernandez <[email protected]>");
504 MODULE_DESCRIPTION("Microchip Inter-Processor Communication (IPC) driver");
505