1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * 8250_dma.c - DMA Engine API support for 8250.c
4  *
5  * Copyright (C) 2013 Intel Corporation
6  */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11 
12 #include "8250.h"
13 
__dma_tx_complete(void * param)14 static void __dma_tx_complete(void *param)
15 {
16 	struct uart_8250_port	*p = param;
17 	struct uart_8250_dma	*dma = p->dma;
18 	struct tty_port		*tport = &p->port.state->port;
19 	unsigned long	flags;
20 	int		ret;
21 
22 	dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 				UART_XMIT_SIZE, DMA_TO_DEVICE);
24 
25 	uart_port_lock_irqsave(&p->port, &flags);
26 
27 	dma->tx_running = 0;
28 
29 	uart_xmit_advance(&p->port, dma->tx_size);
30 
31 	if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
32 		uart_write_wakeup(&p->port);
33 
34 	ret = serial8250_tx_dma(p);
35 	if (ret || !dma->tx_running)
36 		serial8250_set_THRI(p);
37 
38 	uart_port_unlock_irqrestore(&p->port, flags);
39 }
40 
__dma_rx_complete(struct uart_8250_port * p)41 static void __dma_rx_complete(struct uart_8250_port *p)
42 {
43 	struct uart_8250_dma	*dma = p->dma;
44 	struct tty_port		*tty_port = &p->port.state->port;
45 	struct dma_tx_state	state;
46 	enum dma_status		dma_status;
47 	int			count;
48 
49 	/*
50 	 * New DMA Rx can be started during the completion handler before it
51 	 * could acquire port's lock and it might still be ongoing. Don't to
52 	 * anything in such case.
53 	 */
54 	dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
55 	if (dma_status == DMA_IN_PROGRESS)
56 		return;
57 
58 	count = dma->rx_size - state.residue;
59 
60 	tty_insert_flip_string(tty_port, dma->rx_buf, count);
61 	p->port.icount.rx += count;
62 	dma->rx_running = 0;
63 
64 	tty_flip_buffer_push(tty_port);
65 }
66 
dma_rx_complete(void * param)67 static void dma_rx_complete(void *param)
68 {
69 	struct uart_8250_port *p = param;
70 	struct uart_8250_dma *dma = p->dma;
71 	unsigned long flags;
72 
73 	uart_port_lock_irqsave(&p->port, &flags);
74 	if (dma->rx_running)
75 		__dma_rx_complete(p);
76 
77 	/*
78 	 * Cannot be combined with the previous check because __dma_rx_complete()
79 	 * changes dma->rx_running.
80 	 */
81 	if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
82 		p->dma->rx_dma(p);
83 	uart_port_unlock_irqrestore(&p->port, flags);
84 }
85 
serial8250_tx_dma(struct uart_8250_port * p)86 int serial8250_tx_dma(struct uart_8250_port *p)
87 {
88 	struct uart_8250_dma		*dma = p->dma;
89 	struct tty_port			*tport = &p->port.state->port;
90 	struct dma_async_tx_descriptor	*desc;
91 	struct uart_port		*up = &p->port;
92 	struct scatterlist		*sg;
93 	struct scatterlist		sgl[2];
94 	int i;
95 	int ret;
96 
97 	if (dma->tx_running) {
98 		if (up->x_char) {
99 			dmaengine_pause(dma->txchan);
100 			uart_xchar_out(up, UART_TX);
101 			dmaengine_resume(dma->txchan);
102 		}
103 		return 0;
104 	} else if (up->x_char) {
105 		uart_xchar_out(up, UART_TX);
106 	}
107 
108 	if (uart_tx_stopped(&p->port) || kfifo_is_empty(&tport->xmit_fifo)) {
109 		/* We have been called from __dma_tx_complete() */
110 		return 0;
111 	}
112 
113 	serial8250_do_prepare_tx_dma(p);
114 
115 	sg_init_table(sgl, ARRAY_SIZE(sgl));
116 
117 	ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, sgl, ARRAY_SIZE(sgl),
118 					   UART_XMIT_SIZE, dma->tx_addr);
119 
120 	dma->tx_size = 0;
121 
122 	for_each_sg(sgl, sg, ret, i)
123 		dma->tx_size += sg_dma_len(sg);
124 
125 	desc = dmaengine_prep_slave_sg(dma->txchan, sgl, ret,
126 				       DMA_MEM_TO_DEV,
127 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
128 	if (!desc) {
129 		ret = -EBUSY;
130 		goto err;
131 	}
132 
133 	dma->tx_running = 1;
134 	desc->callback = __dma_tx_complete;
135 	desc->callback_param = p;
136 
137 	dma->tx_cookie = dmaengine_submit(desc);
138 
139 	dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
140 				   UART_XMIT_SIZE, DMA_TO_DEVICE);
141 
142 	dma_async_issue_pending(dma->txchan);
143 	serial8250_clear_THRI(p);
144 	dma->tx_err = 0;
145 
146 	return 0;
147 err:
148 	dma->tx_err = 1;
149 	return ret;
150 }
151 
serial8250_tx_dma_flush(struct uart_8250_port * p)152 void serial8250_tx_dma_flush(struct uart_8250_port *p)
153 {
154 	struct uart_8250_dma *dma = p->dma;
155 
156 	if (!dma->tx_running)
157 		return;
158 
159 	/*
160 	 * kfifo_reset() has been called by the serial core, avoid
161 	 * advancing and underflowing in __dma_tx_complete().
162 	 */
163 	dma->tx_size = 0;
164 
165 	dmaengine_terminate_async(dma->txchan);
166 }
167 
serial8250_rx_dma(struct uart_8250_port * p)168 int serial8250_rx_dma(struct uart_8250_port *p)
169 {
170 	struct uart_8250_dma		*dma = p->dma;
171 	struct dma_async_tx_descriptor	*desc;
172 
173 	if (dma->rx_running)
174 		return 0;
175 
176 	serial8250_do_prepare_rx_dma(p);
177 
178 	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
179 					   dma->rx_size, DMA_DEV_TO_MEM,
180 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
181 	if (!desc)
182 		return -EBUSY;
183 
184 	dma->rx_running = 1;
185 	desc->callback = dma_rx_complete;
186 	desc->callback_param = p;
187 
188 	dma->rx_cookie = dmaengine_submit(desc);
189 
190 	dma_async_issue_pending(dma->rxchan);
191 
192 	return 0;
193 }
194 
serial8250_rx_dma_flush(struct uart_8250_port * p)195 void serial8250_rx_dma_flush(struct uart_8250_port *p)
196 {
197 	struct uart_8250_dma *dma = p->dma;
198 
199 	if (dma->rx_running) {
200 		dmaengine_pause(dma->rxchan);
201 		__dma_rx_complete(p);
202 		dmaengine_terminate_async(dma->rxchan);
203 	}
204 }
205 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
206 
serial8250_request_dma(struct uart_8250_port * p)207 int serial8250_request_dma(struct uart_8250_port *p)
208 {
209 	struct uart_8250_dma	*dma = p->dma;
210 	phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
211 				  dma->rx_dma_addr : p->port.mapbase;
212 	phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
213 				  dma->tx_dma_addr : p->port.mapbase;
214 	dma_cap_mask_t		mask;
215 	struct dma_slave_caps	caps;
216 	int			ret;
217 
218 	/* Default slave configuration parameters */
219 	dma->rxconf.direction		= DMA_DEV_TO_MEM;
220 	dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
221 	dma->rxconf.src_addr		= rx_dma_addr + UART_RX;
222 
223 	dma->txconf.direction		= DMA_MEM_TO_DEV;
224 	dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
225 	dma->txconf.dst_addr		= tx_dma_addr + UART_TX;
226 
227 	dma_cap_zero(mask);
228 	dma_cap_set(DMA_SLAVE, mask);
229 
230 	/* Get a channel for RX */
231 	dma->rxchan = dma_request_slave_channel_compat(mask,
232 						       dma->fn, dma->rx_param,
233 						       p->port.dev, "rx");
234 	if (!dma->rxchan)
235 		return -ENODEV;
236 
237 	/* 8250 rx dma requires dmaengine driver to support pause/terminate */
238 	ret = dma_get_slave_caps(dma->rxchan, &caps);
239 	if (ret)
240 		goto release_rx;
241 	if (!caps.cmd_pause || !caps.cmd_terminate ||
242 	    caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
243 		ret = -EINVAL;
244 		goto release_rx;
245 	}
246 
247 	dmaengine_slave_config(dma->rxchan, &dma->rxconf);
248 
249 	/* Get a channel for TX */
250 	dma->txchan = dma_request_slave_channel_compat(mask,
251 						       dma->fn, dma->tx_param,
252 						       p->port.dev, "tx");
253 	if (!dma->txchan) {
254 		ret = -ENODEV;
255 		goto release_rx;
256 	}
257 
258 	/* 8250 tx dma requires dmaengine driver to support terminate */
259 	ret = dma_get_slave_caps(dma->txchan, &caps);
260 	if (ret)
261 		goto err;
262 	if (!caps.cmd_terminate) {
263 		ret = -EINVAL;
264 		goto err;
265 	}
266 
267 	dmaengine_slave_config(dma->txchan, &dma->txconf);
268 
269 	/* RX buffer */
270 	if (!dma->rx_size)
271 		dma->rx_size = PAGE_SIZE;
272 
273 	dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
274 					&dma->rx_addr, GFP_KERNEL);
275 	if (!dma->rx_buf) {
276 		ret = -ENOMEM;
277 		goto err;
278 	}
279 
280 	/* TX buffer */
281 	dma->tx_addr = dma_map_single(dma->txchan->device->dev,
282 					p->port.state->port.xmit_buf,
283 					UART_XMIT_SIZE,
284 					DMA_TO_DEVICE);
285 	if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
286 		dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
287 				  dma->rx_buf, dma->rx_addr);
288 		ret = -ENOMEM;
289 		goto err;
290 	}
291 
292 	dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
293 
294 	return 0;
295 err:
296 	dma_release_channel(dma->txchan);
297 release_rx:
298 	dma_release_channel(dma->rxchan);
299 	return ret;
300 }
301 EXPORT_SYMBOL_GPL(serial8250_request_dma);
302 
serial8250_release_dma(struct uart_8250_port * p)303 void serial8250_release_dma(struct uart_8250_port *p)
304 {
305 	struct uart_8250_dma *dma = p->dma;
306 
307 	if (!dma)
308 		return;
309 
310 	/* Release RX resources */
311 	dmaengine_terminate_sync(dma->rxchan);
312 	dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
313 			  dma->rx_addr);
314 	dma_release_channel(dma->rxchan);
315 	dma->rxchan = NULL;
316 
317 	/* Release TX resources */
318 	dmaengine_terminate_sync(dma->txchan);
319 	dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
320 			 UART_XMIT_SIZE, DMA_TO_DEVICE);
321 	dma_release_channel(dma->txchan);
322 	dma->txchan = NULL;
323 	dma->tx_running = 0;
324 
325 	dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
326 }
327 EXPORT_SYMBOL_GPL(serial8250_release_dma);
328