1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ISHTP client logic
4 *
5 * Copyright (c) 2003-2016, Intel Corporation.
6 */
7
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <asm/cacheflush.h>
14 #include "hbm.h"
15 #include "client.h"
16
17 /**
18 * ishtp_read_list_flush() - Flush read queue
19 * @cl: ishtp client instance
20 *
21 * Used to remove all entries from read queue for a client
22 */
ishtp_read_list_flush(struct ishtp_cl * cl)23 static void ishtp_read_list_flush(struct ishtp_cl *cl)
24 {
25 struct ishtp_cl_rb *rb;
26 struct ishtp_cl_rb *next;
27 unsigned long flags;
28
29 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
30 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
31 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
32 list_del(&rb->list);
33 spin_lock(&cl->free_list_spinlock);
34 list_add_tail(&rb->list, &cl->free_rb_list.list);
35 spin_unlock(&cl->free_list_spinlock);
36 }
37 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
38 }
39
40 /**
41 * ishtp_cl_flush_queues() - Flush all queues for a client
42 * @cl: ishtp client instance
43 *
44 * Used to remove all queues for a client. This is called when a client device
45 * needs reset due to error, S3 resume or during module removal
46 *
47 * Return: 0 on success else -EINVAL if device is NULL
48 */
ishtp_cl_flush_queues(struct ishtp_cl * cl)49 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
50 {
51 if (WARN_ON(!cl || !cl->dev))
52 return -EINVAL;
53
54 ishtp_read_list_flush(cl);
55
56 return 0;
57 }
58 EXPORT_SYMBOL(ishtp_cl_flush_queues);
59
60 /**
61 * ishtp_cl_init() - Initialize all fields of a client device
62 * @cl: ishtp client instance
63 * @dev: ishtp device
64 *
65 * Initializes a client device fields: Init spinlocks, init queues etc.
66 * This function is called during new client creation
67 */
ishtp_cl_init(struct ishtp_cl * cl,struct ishtp_device * dev)68 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
69 {
70 memset(cl, 0, sizeof(struct ishtp_cl));
71 init_waitqueue_head(&cl->wait_ctrl_res);
72 spin_lock_init(&cl->free_list_spinlock);
73 spin_lock_init(&cl->in_process_spinlock);
74 spin_lock_init(&cl->tx_list_spinlock);
75 spin_lock_init(&cl->tx_free_list_spinlock);
76 spin_lock_init(&cl->fc_spinlock);
77 INIT_LIST_HEAD(&cl->link);
78 cl->dev = dev;
79
80 INIT_LIST_HEAD(&cl->free_rb_list.list);
81 INIT_LIST_HEAD(&cl->tx_list.list);
82 INIT_LIST_HEAD(&cl->tx_free_list.list);
83 INIT_LIST_HEAD(&cl->in_process_list.list);
84
85 cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
86 cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
87 cl->tx_ring_free_size = cl->tx_ring_size;
88
89 /* dma */
90 cl->last_tx_path = CL_TX_PATH_IPC;
91 cl->last_dma_acked = 1;
92 cl->last_dma_addr = NULL;
93 cl->last_ipc_acked = 1;
94 }
95
96 /**
97 * ishtp_cl_allocate() - allocates client structure and sets it up.
98 * @cl_device: ishtp client device
99 *
100 * Allocate memory for new client device and call to initialize each field.
101 *
102 * Return: The allocated client instance or NULL on failure
103 */
ishtp_cl_allocate(struct ishtp_cl_device * cl_device)104 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
105 {
106 struct ishtp_cl *cl;
107
108 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
109 if (!cl)
110 return NULL;
111
112 ishtp_cl_init(cl, cl_device->ishtp_dev);
113 return cl;
114 }
115 EXPORT_SYMBOL(ishtp_cl_allocate);
116
117 /**
118 * ishtp_cl_free() - Frees a client device
119 * @cl: client device instance
120 *
121 * Frees a client device
122 */
ishtp_cl_free(struct ishtp_cl * cl)123 void ishtp_cl_free(struct ishtp_cl *cl)
124 {
125 struct ishtp_device *dev;
126 unsigned long flags;
127
128 if (!cl)
129 return;
130
131 dev = cl->dev;
132 if (!dev)
133 return;
134
135 spin_lock_irqsave(&dev->cl_list_lock, flags);
136 ishtp_cl_free_rx_ring(cl);
137 ishtp_cl_free_tx_ring(cl);
138 kfree(cl);
139 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
140 }
141 EXPORT_SYMBOL(ishtp_cl_free);
142
143 /**
144 * ishtp_cl_link() - Reserve a host id and link the client instance
145 * @cl: client device instance
146 *
147 * This allocates a single bit in the hostmap. This function will make sure
148 * that not many client sessions are opened at the same time. Once allocated
149 * the client device instance is added to the ishtp device in the current
150 * client list
151 *
152 * Return: 0 or error code on failure
153 */
ishtp_cl_link(struct ishtp_cl * cl)154 int ishtp_cl_link(struct ishtp_cl *cl)
155 {
156 struct ishtp_device *dev;
157 unsigned long flags, flags_cl;
158 int id, ret = 0;
159
160 if (WARN_ON(!cl || !cl->dev))
161 return -EINVAL;
162
163 dev = cl->dev;
164
165 spin_lock_irqsave(&dev->device_lock, flags);
166
167 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
168 ret = -EMFILE;
169 goto unlock_dev;
170 }
171
172 id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
173
174 if (id >= ISHTP_CLIENTS_MAX) {
175 spin_unlock_irqrestore(&dev->device_lock, flags);
176 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
177 return -ENOENT;
178 }
179
180 dev->open_handle_count++;
181 cl->host_client_id = id;
182 spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
183 if (dev->dev_state != ISHTP_DEV_ENABLED) {
184 ret = -ENODEV;
185 goto unlock_cl;
186 }
187 list_add_tail(&cl->link, &dev->cl_list);
188 set_bit(id, dev->host_clients_map);
189 cl->state = ISHTP_CL_INITIALIZING;
190
191 unlock_cl:
192 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
193 unlock_dev:
194 spin_unlock_irqrestore(&dev->device_lock, flags);
195 return ret;
196 }
197 EXPORT_SYMBOL(ishtp_cl_link);
198
199 /**
200 * ishtp_cl_unlink() - remove fw_cl from the client device list
201 * @cl: client device instance
202 *
203 * Remove a previously linked device to a ishtp device
204 */
ishtp_cl_unlink(struct ishtp_cl * cl)205 void ishtp_cl_unlink(struct ishtp_cl *cl)
206 {
207 struct ishtp_device *dev;
208 struct ishtp_cl *pos;
209 unsigned long flags;
210
211 /* don't shout on error exit path */
212 if (!cl || !cl->dev)
213 return;
214
215 dev = cl->dev;
216
217 spin_lock_irqsave(&dev->device_lock, flags);
218 if (dev->open_handle_count > 0) {
219 clear_bit(cl->host_client_id, dev->host_clients_map);
220 dev->open_handle_count--;
221 }
222 spin_unlock_irqrestore(&dev->device_lock, flags);
223
224 /*
225 * This checks that 'cl' is actually linked into device's structure,
226 * before attempting 'list_del'
227 */
228 spin_lock_irqsave(&dev->cl_list_lock, flags);
229 list_for_each_entry(pos, &dev->cl_list, link)
230 if (cl->host_client_id == pos->host_client_id) {
231 list_del_init(&pos->link);
232 break;
233 }
234 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
235 }
236 EXPORT_SYMBOL(ishtp_cl_unlink);
237
238 /**
239 * ishtp_cl_disconnect() - Send disconnect request to firmware
240 * @cl: client device instance
241 *
242 * Send a disconnect request for a client to firmware.
243 *
244 * Return: 0 if successful disconnect response from the firmware or error
245 * code on failure
246 */
ishtp_cl_disconnect(struct ishtp_cl * cl)247 int ishtp_cl_disconnect(struct ishtp_cl *cl)
248 {
249 struct ishtp_device *dev;
250
251 if (WARN_ON(!cl || !cl->dev))
252 return -ENODEV;
253
254 dev = cl->dev;
255
256 dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
257
258 if (cl->state != ISHTP_CL_DISCONNECTING) {
259 dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
260 return 0;
261 }
262
263 if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
264 dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
265 dev_err(&cl->device->dev, "failed to disconnect.\n");
266 return -ENODEV;
267 }
268
269 wait_event_interruptible_timeout(cl->wait_ctrl_res,
270 (dev->dev_state != ISHTP_DEV_ENABLED ||
271 cl->state == ISHTP_CL_DISCONNECTED),
272 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
273
274 /*
275 * If FW reset arrived, this will happen. Don't check cl->,
276 * as 'cl' may be freed already
277 */
278 if (dev->dev_state != ISHTP_DEV_ENABLED) {
279 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
280 __func__);
281 return -ENODEV;
282 }
283
284 if (cl->state == ISHTP_CL_DISCONNECTED) {
285 dev->print_log(dev, "%s() successful\n", __func__);
286 return 0;
287 }
288
289 return -ENODEV;
290 }
291 EXPORT_SYMBOL(ishtp_cl_disconnect);
292
293 /**
294 * ishtp_cl_is_other_connecting() - Check other client is connecting
295 * @cl: client device instance
296 *
297 * Checks if other client with the same fw client id is connecting
298 *
299 * Return: true if other client is connected else false
300 */
ishtp_cl_is_other_connecting(struct ishtp_cl * cl)301 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
302 {
303 struct ishtp_device *dev;
304 struct ishtp_cl *pos;
305 unsigned long flags;
306
307 if (WARN_ON(!cl || !cl->dev))
308 return false;
309
310 dev = cl->dev;
311 spin_lock_irqsave(&dev->cl_list_lock, flags);
312 list_for_each_entry(pos, &dev->cl_list, link) {
313 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
314 cl->fw_client_id == pos->fw_client_id) {
315 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
316 return true;
317 }
318 }
319 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
320
321 return false;
322 }
323
324 /**
325 * ishtp_cl_connect_to_fw() - Send connect request to firmware
326 * @cl: client device instance
327 *
328 * Send a connect request to the firmware and wait for firmware response.
329 * If there is successful connection response from the firmware, change
330 * client state to ISHTP_CL_CONNECTED, and bind client to related
331 * firmware client_id.
332 *
333 * Return: 0 for success and error code on failure
334 */
ishtp_cl_connect_to_fw(struct ishtp_cl * cl)335 static int ishtp_cl_connect_to_fw(struct ishtp_cl *cl)
336 {
337 struct ishtp_device *dev;
338 int rets;
339
340 if (WARN_ON(!cl || !cl->dev))
341 return -ENODEV;
342
343 dev = cl->dev;
344
345 if (ishtp_cl_is_other_connecting(cl)) {
346 dev->print_log(dev, "%s() Busy\n", __func__);
347 return -EBUSY;
348 }
349
350 if (ishtp_hbm_cl_connect_req(dev, cl)) {
351 dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
352 return -ENODEV;
353 }
354
355 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
356 (dev->dev_state == ISHTP_DEV_ENABLED &&
357 (cl->state == ISHTP_CL_CONNECTED ||
358 cl->state == ISHTP_CL_DISCONNECTED)),
359 ishtp_secs_to_jiffies(
360 ISHTP_CL_CONNECT_TIMEOUT));
361 /*
362 * If FW reset arrived, this will happen. Don't check cl->,
363 * as 'cl' may be freed already
364 */
365 if (dev->dev_state != ISHTP_DEV_ENABLED) {
366 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
367 __func__);
368 return -EFAULT;
369 }
370
371 if (cl->state != ISHTP_CL_CONNECTED) {
372 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
373 __func__);
374 return -EFAULT;
375 }
376
377 rets = cl->status;
378 if (rets) {
379 dev->print_log(dev, "%s() Invalid status\n", __func__);
380 return rets;
381 }
382
383 rets = ishtp_cl_device_bind(cl);
384 if (rets) {
385 dev->print_log(dev, "%s() Bind error\n", __func__);
386 ishtp_cl_disconnect(cl);
387 return rets;
388 }
389
390 return rets;
391 }
392
393 /**
394 * ishtp_cl_connect() - Build connection with firmware
395 * @cl: client device instance
396 *
397 * Call ishtp_cl_connect_to_fw() to connect and bind to firmware. If successful,
398 * allocate RX and TX ring buffers, and start flow control with firmware to
399 * start communication.
400 *
401 * Return: 0 if there is successful connection to the firmware, allocate
402 * ring buffers.
403 */
ishtp_cl_connect(struct ishtp_cl * cl)404 int ishtp_cl_connect(struct ishtp_cl *cl)
405 {
406 struct ishtp_device *dev;
407 int rets;
408
409 if (!cl || !cl->dev)
410 return -ENODEV;
411
412 dev = cl->dev;
413
414 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
415
416 rets = ishtp_cl_connect_to_fw(cl);
417 if (rets) {
418 dev->print_log(dev, "%s() Connect to fw failed\n", __func__);
419 return rets;
420 }
421
422 rets = ishtp_cl_alloc_rx_ring(cl);
423 if (rets) {
424 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
425 /* if failed allocation, disconnect */
426 ishtp_cl_disconnect(cl);
427 return rets;
428 }
429
430 rets = ishtp_cl_alloc_tx_ring(cl);
431 if (rets) {
432 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
433 /* if failed allocation, disconnect */
434 ishtp_cl_free_rx_ring(cl);
435 ishtp_cl_disconnect(cl);
436 return rets;
437 }
438
439 /*
440 * Upon successful connection and allocation, start flow-control.
441 */
442 rets = ishtp_cl_read_start(cl);
443
444 return rets;
445 }
446 EXPORT_SYMBOL(ishtp_cl_connect);
447
448 /**
449 * ishtp_cl_establish_connection() - Establish connection with the firmware
450 * @cl: client device instance
451 * @uuid: uuid of the client to search
452 * @tx_size: TX ring buffer size
453 * @rx_size: RX ring buffer size
454 * @reset: true if called for reset connection, otherwise for first connection
455 *
456 * This is a helper function for client driver to build connection with firmware.
457 * If it's first time connecting to the firmware, set reset to false, this
458 * function will link client to bus, find client id and send connect request to
459 * the firmware.
460 *
461 * If it's called for reset handler where client lost connection after
462 * firmware reset, set reset to true, this function will reinit client state and
463 * establish connection again. In this case, this function reuses current client
464 * structure and ring buffers to avoid allocation failure and memory fragments.
465 *
466 * Return: 0 for successful connection with the firmware,
467 * or error code on failure
468 */
ishtp_cl_establish_connection(struct ishtp_cl * cl,const guid_t * uuid,int tx_size,int rx_size,bool reset)469 int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
470 int tx_size, int rx_size, bool reset)
471 {
472 struct ishtp_device *dev;
473 struct ishtp_fw_client *fw_client;
474 int rets;
475
476 if (!cl || !cl->dev)
477 return -ENODEV;
478
479 dev = cl->dev;
480
481 ishtp_set_connection_state(cl, ISHTP_CL_INITIALIZING);
482
483 /* reinit ishtp_cl structure if call for reset */
484 if (reset) {
485 cl->host_client_id = 0;
486 cl->fw_client_id = 0;
487 cl->ishtp_flow_ctrl_creds = 0;
488 cl->out_flow_ctrl_creds = 0;
489
490 cl->last_tx_path = CL_TX_PATH_IPC;
491 cl->last_dma_acked = 1;
492 cl->last_dma_addr = NULL;
493 cl->last_ipc_acked = 1;
494
495 cl->sending = 0;
496 cl->err_send_msg = 0;
497 cl->err_send_fc = 0;
498
499 cl->send_msg_cnt_ipc = 0;
500 cl->send_msg_cnt_dma = 0;
501 cl->recv_msg_cnt_ipc = 0;
502 cl->recv_msg_cnt_dma = 0;
503 cl->recv_msg_num_frags = 0;
504 cl->ishtp_flow_ctrl_cnt = 0;
505 cl->out_flow_ctrl_cnt = 0;
506 }
507
508 /* link to bus */
509 rets = ishtp_cl_link(cl);
510 if (rets) {
511 dev->print_log(dev, "%s() ishtp_cl_link failed\n", __func__);
512 return rets;
513 }
514
515 /* find firmware client */
516 fw_client = ishtp_fw_cl_get_client(dev, uuid);
517 if (!fw_client) {
518 dev->print_log(dev,
519 "%s() ish client uuid not found\n", __func__);
520 return -ENOENT;
521 }
522
523 ishtp_set_tx_ring_size(cl, tx_size);
524 ishtp_set_rx_ring_size(cl, rx_size);
525
526 ishtp_cl_set_fw_client_id(cl, ishtp_get_fw_client_id(fw_client));
527
528 ishtp_set_connection_state(cl, ISHTP_CL_CONNECTING);
529
530 /*
531 * For reset case, not allocate tx/rx ring buffer which are already
532 * done in ishtp_cl_connect() during first connection.
533 */
534 if (reset) {
535 rets = ishtp_cl_connect_to_fw(cl);
536 if (!rets)
537 rets = ishtp_cl_read_start(cl);
538 else
539 dev->print_log(dev,
540 "%s() connect to fw failed\n", __func__);
541 } else {
542 rets = ishtp_cl_connect(cl);
543 }
544
545 return rets;
546 }
547 EXPORT_SYMBOL(ishtp_cl_establish_connection);
548
549 /**
550 * ishtp_cl_destroy_connection() - Disconnect with the firmware
551 * @cl: client device instance
552 * @reset: true if called for firmware reset, false for normal disconnection
553 *
554 * This is a helper function for client driver to disconnect with firmware,
555 * unlink to bus and flush message queue.
556 */
ishtp_cl_destroy_connection(struct ishtp_cl * cl,bool reset)557 void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset)
558 {
559 if (!cl)
560 return;
561
562 if (reset) {
563 /*
564 * For reset case, connection is already lost during fw reset.
565 * Just set state to DISCONNECTED is enough.
566 */
567 ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTED);
568 } else {
569 if (cl->state != ISHTP_CL_DISCONNECTED) {
570 ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTING);
571 ishtp_cl_disconnect(cl);
572 }
573 }
574
575 ishtp_cl_unlink(cl);
576 ishtp_cl_flush_queues(cl);
577 }
578 EXPORT_SYMBOL(ishtp_cl_destroy_connection);
579
580 /**
581 * ishtp_cl_read_start() - Prepare to read client message
582 * @cl: client device instance
583 *
584 * Get a free buffer from pool of free read buffers and add to read buffer
585 * pool to add contents. Send a flow control request to firmware to be able
586 * send next message.
587 *
588 * Return: 0 if successful or error code on failure
589 */
ishtp_cl_read_start(struct ishtp_cl * cl)590 int ishtp_cl_read_start(struct ishtp_cl *cl)
591 {
592 struct ishtp_device *dev;
593 struct ishtp_cl_rb *rb;
594 int rets;
595 int i;
596 unsigned long flags;
597 unsigned long dev_flags;
598
599 if (WARN_ON(!cl || !cl->dev))
600 return -ENODEV;
601
602 dev = cl->dev;
603
604 if (cl->state != ISHTP_CL_CONNECTED)
605 return -ENODEV;
606
607 if (dev->dev_state != ISHTP_DEV_ENABLED)
608 return -ENODEV;
609
610 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
611 if (i < 0) {
612 dev_err(&cl->device->dev, "no such fw client %d\n",
613 cl->fw_client_id);
614 return -ENODEV;
615 }
616
617 /* The current rb is the head of the free rb list */
618 spin_lock_irqsave(&cl->free_list_spinlock, flags);
619 if (list_empty(&cl->free_rb_list.list)) {
620 dev_warn(&cl->device->dev,
621 "[ishtp-ish] Rx buffers pool is empty\n");
622 rets = -ENOMEM;
623 rb = NULL;
624 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
625 goto out;
626 }
627 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
628 list_del_init(&rb->list);
629 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
630
631 rb->cl = cl;
632 rb->buf_idx = 0;
633
634 INIT_LIST_HEAD(&rb->list);
635 rets = 0;
636
637 /*
638 * This must be BEFORE sending flow control -
639 * response in ISR may come too fast...
640 */
641 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
642 list_add_tail(&rb->list, &dev->read_list.list);
643 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
644 if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
645 rets = -ENODEV;
646 goto out;
647 }
648 out:
649 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
650 if (rets && rb) {
651 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
652 list_del(&rb->list);
653 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
654
655 spin_lock_irqsave(&cl->free_list_spinlock, flags);
656 list_add_tail(&rb->list, &cl->free_rb_list.list);
657 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
658 }
659 return rets;
660 }
661
662 /**
663 * ishtp_cl_send() - Send a message to firmware
664 * @cl: client device instance
665 * @buf: message buffer
666 * @length: length of message
667 *
668 * If the client is correct state to send message, this function gets a buffer
669 * from tx ring buffers, copy the message data and call to send the message
670 * using ishtp_cl_send_msg()
671 *
672 * Return: 0 if successful or error code on failure
673 */
ishtp_cl_send(struct ishtp_cl * cl,uint8_t * buf,size_t length)674 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
675 {
676 struct ishtp_device *dev;
677 int id;
678 struct ishtp_cl_tx_ring *cl_msg;
679 int have_msg_to_send = 0;
680 unsigned long tx_flags, tx_free_flags;
681
682 if (WARN_ON(!cl || !cl->dev))
683 return -ENODEV;
684
685 dev = cl->dev;
686
687 if (cl->state != ISHTP_CL_CONNECTED) {
688 ++cl->err_send_msg;
689 return -EPIPE;
690 }
691
692 if (dev->dev_state != ISHTP_DEV_ENABLED) {
693 ++cl->err_send_msg;
694 return -ENODEV;
695 }
696
697 /* Check if we have fw client device */
698 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
699 if (id < 0) {
700 ++cl->err_send_msg;
701 return -ENOENT;
702 }
703
704 if (length > dev->fw_clients[id].props.max_msg_length) {
705 ++cl->err_send_msg;
706 return -EMSGSIZE;
707 }
708
709 /* No free bufs */
710 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
711 if (list_empty(&cl->tx_free_list.list)) {
712 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
713 tx_free_flags);
714 ++cl->err_send_msg;
715 return -ENOMEM;
716 }
717
718 cl_msg = list_first_entry(&cl->tx_free_list.list,
719 struct ishtp_cl_tx_ring, list);
720 if (!cl_msg->send_buf.data) {
721 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
722 tx_free_flags);
723 return -EIO;
724 /* Should not happen, as free list is pre-allocated */
725 }
726 /*
727 * This is safe, as 'length' is already checked for not exceeding
728 * max ISHTP message size per client
729 */
730 list_del_init(&cl_msg->list);
731 --cl->tx_ring_free_size;
732
733 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
734 memcpy(cl_msg->send_buf.data, buf, length);
735 cl_msg->send_buf.size = length;
736 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
737 have_msg_to_send = !list_empty(&cl->tx_list.list);
738 list_add_tail(&cl_msg->list, &cl->tx_list.list);
739 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
740
741 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
742 ishtp_cl_send_msg(dev, cl);
743
744 return 0;
745 }
746 EXPORT_SYMBOL(ishtp_cl_send);
747
748 /**
749 * ishtp_cl_read_complete() - read complete
750 * @rb: Pointer to client request block
751 *
752 * If the message is completely received call ishtp_cl_bus_rx_event()
753 * to process message
754 */
ishtp_cl_read_complete(struct ishtp_cl_rb * rb)755 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
756 {
757 unsigned long flags;
758 int schedule_work_flag = 0;
759 struct ishtp_cl *cl = rb->cl;
760
761 spin_lock_irqsave(&cl->in_process_spinlock, flags);
762 /*
763 * if in-process list is empty, then need to schedule
764 * the processing thread
765 */
766 schedule_work_flag = list_empty(&cl->in_process_list.list);
767 list_add_tail(&rb->list, &cl->in_process_list.list);
768 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
769
770 if (schedule_work_flag)
771 ishtp_cl_bus_rx_event(cl->device);
772 }
773
774 /**
775 * ipc_tx_send() - IPC tx send function
776 * @prm: Pointer to client device instance
777 *
778 * Send message over IPC. Message will be split into fragments
779 * if message size is bigger than IPC FIFO size, and all
780 * fragments will be sent one by one.
781 */
ipc_tx_send(void * prm)782 static void ipc_tx_send(void *prm)
783 {
784 struct ishtp_cl *cl = prm;
785 struct ishtp_cl_tx_ring *cl_msg;
786 size_t rem;
787 struct ishtp_device *dev = (cl ? cl->dev : NULL);
788 struct ishtp_msg_hdr ishtp_hdr;
789 unsigned long tx_flags, tx_free_flags;
790 unsigned char *pmsg;
791
792 if (!dev)
793 return;
794
795 /*
796 * Other conditions if some critical error has
797 * occurred before this callback is called
798 */
799 if (dev->dev_state != ISHTP_DEV_ENABLED)
800 return;
801
802 if (cl->state != ISHTP_CL_CONNECTED)
803 return;
804
805 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
806 if (list_empty(&cl->tx_list.list)) {
807 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
808 return;
809 }
810
811 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
812 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
813 return;
814 }
815
816 if (!cl->sending) {
817 --cl->ishtp_flow_ctrl_creds;
818 cl->last_ipc_acked = 0;
819 cl->last_tx_path = CL_TX_PATH_IPC;
820 cl->sending = 1;
821 }
822
823 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
824 list);
825 rem = cl_msg->send_buf.size - cl->tx_offs;
826
827 while (rem > 0) {
828 ishtp_hdr.host_addr = cl->host_client_id;
829 ishtp_hdr.fw_addr = cl->fw_client_id;
830 ishtp_hdr.reserved = 0;
831 pmsg = cl_msg->send_buf.data + cl->tx_offs;
832
833 if (rem <= dev->mtu) {
834 /* Last fragment or only one packet */
835 ishtp_hdr.length = rem;
836 ishtp_hdr.msg_complete = 1;
837 /* Submit to IPC queue with no callback */
838 ishtp_write_message(dev, &ishtp_hdr, pmsg);
839 cl->tx_offs = 0;
840 cl->sending = 0;
841
842 break;
843 } else {
844 /* Send ipc fragment */
845 ishtp_hdr.length = dev->mtu;
846 ishtp_hdr.msg_complete = 0;
847 /* All fragments submitted to IPC queue with no callback */
848 ishtp_write_message(dev, &ishtp_hdr, pmsg);
849 cl->tx_offs += dev->mtu;
850 rem = cl_msg->send_buf.size - cl->tx_offs;
851 }
852 }
853
854 list_del_init(&cl_msg->list);
855 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
856
857 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
858 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
859 ++cl->tx_ring_free_size;
860 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
861 tx_free_flags);
862 }
863
864 /**
865 * ishtp_cl_send_msg_ipc() -Send message using IPC
866 * @dev: ISHTP device instance
867 * @cl: Pointer to client device instance
868 *
869 * Send message over IPC not using DMA
870 */
ishtp_cl_send_msg_ipc(struct ishtp_device * dev,struct ishtp_cl * cl)871 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
872 struct ishtp_cl *cl)
873 {
874 /* If last DMA message wasn't acked yet, leave this one in Tx queue */
875 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
876 return;
877
878 cl->tx_offs = 0;
879 ipc_tx_send(cl);
880 ++cl->send_msg_cnt_ipc;
881 }
882
883 /**
884 * ishtp_cl_send_msg_dma() -Send message using DMA
885 * @dev: ISHTP device instance
886 * @cl: Pointer to client device instance
887 *
888 * Send message using DMA
889 */
ishtp_cl_send_msg_dma(struct ishtp_device * dev,struct ishtp_cl * cl)890 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
891 struct ishtp_cl *cl)
892 {
893 struct ishtp_msg_hdr hdr;
894 struct dma_xfer_hbm dma_xfer;
895 unsigned char *msg_addr;
896 int off;
897 struct ishtp_cl_tx_ring *cl_msg;
898 unsigned long tx_flags, tx_free_flags;
899
900 /* If last IPC message wasn't acked yet, leave this one in Tx queue */
901 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
902 return;
903
904 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
905 if (list_empty(&cl->tx_list.list)) {
906 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
907 return;
908 }
909
910 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
911 list);
912
913 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
914 if (!msg_addr) {
915 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
916 if (dev->transfer_path == CL_TX_PATH_DEFAULT)
917 ishtp_cl_send_msg_ipc(dev, cl);
918 return;
919 }
920
921 list_del_init(&cl_msg->list); /* Must be before write */
922 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
923
924 --cl->ishtp_flow_ctrl_creds;
925 cl->last_dma_acked = 0;
926 cl->last_dma_addr = msg_addr;
927 cl->last_tx_path = CL_TX_PATH_DMA;
928
929 /* write msg to dma buf */
930 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
931
932 /*
933 * if current fw don't support cache snooping, driver have to
934 * flush the cache manually.
935 */
936 if (dev->ops->dma_no_cache_snooping &&
937 dev->ops->dma_no_cache_snooping(dev))
938 clflush_cache_range(msg_addr, cl_msg->send_buf.size);
939
940 /* send dma_xfer hbm msg */
941 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
942 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
943 dma_xfer.hbm = DMA_XFER;
944 dma_xfer.fw_client_id = cl->fw_client_id;
945 dma_xfer.host_client_id = cl->host_client_id;
946 dma_xfer.reserved = 0;
947 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
948 dma_xfer.msg_length = cl_msg->send_buf.size;
949 dma_xfer.reserved2 = 0;
950 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
951 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
952 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
953 ++cl->tx_ring_free_size;
954 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
955 ++cl->send_msg_cnt_dma;
956 }
957
958 /**
959 * ishtp_cl_send_msg() -Send message using DMA or IPC
960 * @dev: ISHTP device instance
961 * @cl: Pointer to client device instance
962 *
963 * Send message using DMA or IPC based on transfer_path
964 */
ishtp_cl_send_msg(struct ishtp_device * dev,struct ishtp_cl * cl)965 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
966 {
967 if (dev->transfer_path == CL_TX_PATH_DMA)
968 ishtp_cl_send_msg_dma(dev, cl);
969 else
970 ishtp_cl_send_msg_ipc(dev, cl);
971 }
972
973 /**
974 * recv_ishtp_cl_msg() -Receive client message
975 * @dev: ISHTP device instance
976 * @ishtp_hdr: Pointer to message header
977 *
978 * Receive and dispatch ISHTP client messages. This function executes in ISR
979 * or work queue context
980 */
recv_ishtp_cl_msg(struct ishtp_device * dev,struct ishtp_msg_hdr * ishtp_hdr)981 void recv_ishtp_cl_msg(struct ishtp_device *dev,
982 struct ishtp_msg_hdr *ishtp_hdr)
983 {
984 struct ishtp_cl *cl;
985 struct ishtp_cl_rb *rb;
986 struct ishtp_cl_rb *new_rb;
987 unsigned char *buffer = NULL;
988 struct ishtp_cl_rb *complete_rb = NULL;
989 unsigned long flags;
990
991 if (ishtp_hdr->reserved) {
992 dev_err(dev->devc, "corrupted message header.\n");
993 goto eoi;
994 }
995
996 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
997 dev_err(dev->devc,
998 "ISHTP message length in hdr exceeds IPC MTU\n");
999 goto eoi;
1000 }
1001
1002 spin_lock_irqsave(&dev->read_list_spinlock, flags);
1003 list_for_each_entry(rb, &dev->read_list.list, list) {
1004 cl = rb->cl;
1005 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
1006 cl->fw_client_id == ishtp_hdr->fw_addr) ||
1007 !(cl->state == ISHTP_CL_CONNECTED))
1008 continue;
1009
1010 /* If no Rx buffer is allocated, disband the rb */
1011 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
1012 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1013 dev_err(&cl->device->dev,
1014 "Rx buffer is not allocated.\n");
1015 list_del(&rb->list);
1016 ishtp_io_rb_free(rb);
1017 cl->status = -ENOMEM;
1018 goto eoi;
1019 }
1020
1021 /*
1022 * If message buffer overflown (exceeds max. client msg
1023 * size, drop message and return to free buffer.
1024 * Do we need to disconnect such a client? (We don't send
1025 * back FC, so communication will be stuck anyway)
1026 */
1027 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
1028 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1029 dev_err(&cl->device->dev,
1030 "message overflow. size %d len %d idx %ld\n",
1031 rb->buffer.size, ishtp_hdr->length,
1032 rb->buf_idx);
1033 list_del(&rb->list);
1034 ishtp_cl_io_rb_recycle(rb);
1035 cl->status = -EIO;
1036 goto eoi;
1037 }
1038
1039 buffer = rb->buffer.data + rb->buf_idx;
1040 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
1041
1042 rb->buf_idx += ishtp_hdr->length;
1043 if (ishtp_hdr->msg_complete) {
1044 /* Last fragment in message - it's complete */
1045 cl->status = 0;
1046 list_del(&rb->list);
1047 complete_rb = rb;
1048
1049 --cl->out_flow_ctrl_creds;
1050 /*
1051 * the whole msg arrived, send a new FC, and add a new
1052 * rb buffer for the next coming msg
1053 */
1054 spin_lock(&cl->free_list_spinlock);
1055
1056 if (!list_empty(&cl->free_rb_list.list)) {
1057 new_rb = list_entry(cl->free_rb_list.list.next,
1058 struct ishtp_cl_rb, list);
1059 list_del_init(&new_rb->list);
1060 spin_unlock(&cl->free_list_spinlock);
1061 new_rb->cl = cl;
1062 new_rb->buf_idx = 0;
1063 INIT_LIST_HEAD(&new_rb->list);
1064 list_add_tail(&new_rb->list,
1065 &dev->read_list.list);
1066
1067 ishtp_hbm_cl_flow_control_req(dev, cl);
1068 } else {
1069 spin_unlock(&cl->free_list_spinlock);
1070 }
1071 }
1072 /* One more fragment in message (even if this was last) */
1073 ++cl->recv_msg_num_frags;
1074
1075 /*
1076 * We can safely break here (and in BH too),
1077 * a single input message can go only to a single request!
1078 */
1079 break;
1080 }
1081
1082 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1083 /* If it's nobody's message, just read and discard it */
1084 if (!buffer) {
1085 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
1086
1087 dev_err(dev->devc, "Dropped Rx msg - no request\n");
1088 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
1089 goto eoi;
1090 }
1091
1092 if (complete_rb) {
1093 cl = complete_rb->cl;
1094 cl->ts_rx = ktime_get();
1095 ++cl->recv_msg_cnt_ipc;
1096 ishtp_cl_read_complete(complete_rb);
1097 }
1098 eoi:
1099 return;
1100 }
1101
1102 /**
1103 * recv_ishtp_cl_msg_dma() -Receive client message
1104 * @dev: ISHTP device instance
1105 * @msg: message pointer
1106 * @hbm: hbm buffer
1107 *
1108 * Receive and dispatch ISHTP client messages using DMA. This function executes
1109 * in ISR or work queue context
1110 */
recv_ishtp_cl_msg_dma(struct ishtp_device * dev,void * msg,struct dma_xfer_hbm * hbm)1111 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
1112 struct dma_xfer_hbm *hbm)
1113 {
1114 struct ishtp_cl *cl;
1115 struct ishtp_cl_rb *rb;
1116 struct ishtp_cl_rb *new_rb;
1117 unsigned char *buffer = NULL;
1118 struct ishtp_cl_rb *complete_rb = NULL;
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&dev->read_list_spinlock, flags);
1122
1123 list_for_each_entry(rb, &dev->read_list.list, list) {
1124 cl = rb->cl;
1125 if (!cl || !(cl->host_client_id == hbm->host_client_id &&
1126 cl->fw_client_id == hbm->fw_client_id) ||
1127 !(cl->state == ISHTP_CL_CONNECTED))
1128 continue;
1129
1130 /*
1131 * If no Rx buffer is allocated, disband the rb
1132 */
1133 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
1134 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1135 dev_err(&cl->device->dev,
1136 "response buffer is not allocated.\n");
1137 list_del(&rb->list);
1138 ishtp_io_rb_free(rb);
1139 cl->status = -ENOMEM;
1140 goto eoi;
1141 }
1142
1143 /*
1144 * If message buffer overflown (exceeds max. client msg
1145 * size, drop message and return to free buffer.
1146 * Do we need to disconnect such a client? (We don't send
1147 * back FC, so communication will be stuck anyway)
1148 */
1149 if (rb->buffer.size < hbm->msg_length) {
1150 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1151 dev_err(&cl->device->dev,
1152 "message overflow. size %d len %d idx %ld\n",
1153 rb->buffer.size, hbm->msg_length, rb->buf_idx);
1154 list_del(&rb->list);
1155 ishtp_cl_io_rb_recycle(rb);
1156 cl->status = -EIO;
1157 goto eoi;
1158 }
1159
1160 buffer = rb->buffer.data;
1161
1162 /*
1163 * if current fw don't support cache snooping, driver have to
1164 * flush the cache manually.
1165 */
1166 if (dev->ops->dma_no_cache_snooping &&
1167 dev->ops->dma_no_cache_snooping(dev))
1168 clflush_cache_range(msg, hbm->msg_length);
1169
1170 memcpy(buffer, msg, hbm->msg_length);
1171 rb->buf_idx = hbm->msg_length;
1172
1173 /* Last fragment in message - it's complete */
1174 cl->status = 0;
1175 list_del(&rb->list);
1176 complete_rb = rb;
1177
1178 --cl->out_flow_ctrl_creds;
1179 /*
1180 * the whole msg arrived, send a new FC, and add a new
1181 * rb buffer for the next coming msg
1182 */
1183 spin_lock(&cl->free_list_spinlock);
1184
1185 if (!list_empty(&cl->free_rb_list.list)) {
1186 new_rb = list_entry(cl->free_rb_list.list.next,
1187 struct ishtp_cl_rb, list);
1188 list_del_init(&new_rb->list);
1189 spin_unlock(&cl->free_list_spinlock);
1190 new_rb->cl = cl;
1191 new_rb->buf_idx = 0;
1192 INIT_LIST_HEAD(&new_rb->list);
1193 list_add_tail(&new_rb->list,
1194 &dev->read_list.list);
1195
1196 ishtp_hbm_cl_flow_control_req(dev, cl);
1197 } else {
1198 spin_unlock(&cl->free_list_spinlock);
1199 }
1200
1201 /* One more fragment in message (this is always last) */
1202 ++cl->recv_msg_num_frags;
1203
1204 /*
1205 * We can safely break here (and in BH too),
1206 * a single input message can go only to a single request!
1207 */
1208 break;
1209 }
1210
1211 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1212 /* If it's nobody's message, just read and discard it */
1213 if (!buffer) {
1214 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1215 goto eoi;
1216 }
1217
1218 if (complete_rb) {
1219 cl = complete_rb->cl;
1220 cl->ts_rx = ktime_get();
1221 ++cl->recv_msg_cnt_dma;
1222 ishtp_cl_read_complete(complete_rb);
1223 }
1224 eoi:
1225 return;
1226 }
1227
ishtp_get_client_data(struct ishtp_cl * cl)1228 void *ishtp_get_client_data(struct ishtp_cl *cl)
1229 {
1230 return cl->client_data;
1231 }
1232 EXPORT_SYMBOL(ishtp_get_client_data);
1233
ishtp_set_client_data(struct ishtp_cl * cl,void * data)1234 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1235 {
1236 cl->client_data = data;
1237 }
1238 EXPORT_SYMBOL(ishtp_set_client_data);
1239
ishtp_get_ishtp_device(struct ishtp_cl * cl)1240 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1241 {
1242 return cl->dev;
1243 }
1244 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1245
ishtp_set_tx_ring_size(struct ishtp_cl * cl,int size)1246 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1247 {
1248 cl->tx_ring_size = size;
1249 }
1250 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1251
ishtp_set_rx_ring_size(struct ishtp_cl * cl,int size)1252 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1253 {
1254 cl->rx_ring_size = size;
1255 }
1256 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1257
ishtp_set_connection_state(struct ishtp_cl * cl,int state)1258 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1259 {
1260 cl->state = state;
1261 }
1262 EXPORT_SYMBOL(ishtp_set_connection_state);
1263
ishtp_cl_set_fw_client_id(struct ishtp_cl * cl,int fw_client_id)1264 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1265 {
1266 cl->fw_client_id = fw_client_id;
1267 }
1268 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
1269