1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics SA 2017
5 * Authors: Maxime Coquelin <[email protected]>
6 * Gerald Baeza <[email protected]>
7 * Erwan Le Ray <[email protected]>
8 *
9 * Inspired by st-asc.c from STMicroelectronics (c)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/console.h>
15 #include <linux/delay.h>
16 #include <linux/dma-direction.h>
17 #include <linux/dmaengine.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/irq.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/pinctrl/consumer.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/pm_wakeirq.h>
29 #include <linux/serial_core.h>
30 #include <linux/serial.h>
31 #include <linux/spinlock.h>
32 #include <linux/sysrq.h>
33 #include <linux/tty_flip.h>
34 #include <linux/tty.h>
35
36 #include "serial_mctrl_gpio.h"
37 #include "stm32-usart.h"
38
39
40 /* Register offsets */
41 static struct stm32_usart_info __maybe_unused stm32f4_info = {
42 .ofs = {
43 .isr = 0x00,
44 .rdr = 0x04,
45 .tdr = 0x04,
46 .brr = 0x08,
47 .cr1 = 0x0c,
48 .cr2 = 0x10,
49 .cr3 = 0x14,
50 .gtpr = 0x18,
51 .rtor = UNDEF_REG,
52 .rqr = UNDEF_REG,
53 .icr = UNDEF_REG,
54 .presc = UNDEF_REG,
55 .hwcfgr1 = UNDEF_REG,
56 },
57 .cfg = {
58 .uart_enable_bit = 13,
59 .has_7bits_data = false,
60 }
61 };
62
63 static struct stm32_usart_info __maybe_unused stm32f7_info = {
64 .ofs = {
65 .cr1 = 0x00,
66 .cr2 = 0x04,
67 .cr3 = 0x08,
68 .brr = 0x0c,
69 .gtpr = 0x10,
70 .rtor = 0x14,
71 .rqr = 0x18,
72 .isr = 0x1c,
73 .icr = 0x20,
74 .rdr = 0x24,
75 .tdr = 0x28,
76 .presc = UNDEF_REG,
77 .hwcfgr1 = UNDEF_REG,
78 },
79 .cfg = {
80 .uart_enable_bit = 0,
81 .has_7bits_data = true,
82 .has_swap = true,
83 }
84 };
85
86 static struct stm32_usart_info __maybe_unused stm32h7_info = {
87 .ofs = {
88 .cr1 = 0x00,
89 .cr2 = 0x04,
90 .cr3 = 0x08,
91 .brr = 0x0c,
92 .gtpr = 0x10,
93 .rtor = 0x14,
94 .rqr = 0x18,
95 .isr = 0x1c,
96 .icr = 0x20,
97 .rdr = 0x24,
98 .tdr = 0x28,
99 .presc = 0x2c,
100 .hwcfgr1 = 0x3f0,
101 },
102 .cfg = {
103 .uart_enable_bit = 0,
104 .has_7bits_data = true,
105 .has_swap = true,
106 .has_wakeup = true,
107 .has_fifo = true,
108 }
109 };
110
111 static void stm32_usart_stop_tx(struct uart_port *port);
112 static void stm32_usart_transmit_chars(struct uart_port *port);
113 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
114
to_stm32_port(struct uart_port * port)115 static inline struct stm32_port *to_stm32_port(struct uart_port *port)
116 {
117 return container_of(port, struct stm32_port, port);
118 }
119
stm32_usart_set_bits(struct uart_port * port,u32 reg,u32 bits)120 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
121 {
122 u32 val;
123
124 val = readl_relaxed(port->membase + reg);
125 val |= bits;
126 writel_relaxed(val, port->membase + reg);
127 }
128
stm32_usart_clr_bits(struct uart_port * port,u32 reg,u32 bits)129 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
130 {
131 u32 val;
132
133 val = readl_relaxed(port->membase + reg);
134 val &= ~bits;
135 writel_relaxed(val, port->membase + reg);
136 }
137
stm32_usart_tx_empty(struct uart_port * port)138 static unsigned int stm32_usart_tx_empty(struct uart_port *port)
139 {
140 struct stm32_port *stm32_port = to_stm32_port(port);
141 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
142
143 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
144 return TIOCSER_TEMT;
145
146 return 0;
147 }
148
stm32_usart_rs485_rts_enable(struct uart_port * port)149 static void stm32_usart_rs485_rts_enable(struct uart_port *port)
150 {
151 struct stm32_port *stm32_port = to_stm32_port(port);
152 struct serial_rs485 *rs485conf = &port->rs485;
153
154 if (stm32_port->hw_flow_control ||
155 !(rs485conf->flags & SER_RS485_ENABLED))
156 return;
157
158 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
159 mctrl_gpio_set(stm32_port->gpios,
160 stm32_port->port.mctrl | TIOCM_RTS);
161 } else {
162 mctrl_gpio_set(stm32_port->gpios,
163 stm32_port->port.mctrl & ~TIOCM_RTS);
164 }
165 }
166
stm32_usart_rs485_rts_disable(struct uart_port * port)167 static void stm32_usart_rs485_rts_disable(struct uart_port *port)
168 {
169 struct stm32_port *stm32_port = to_stm32_port(port);
170 struct serial_rs485 *rs485conf = &port->rs485;
171
172 if (stm32_port->hw_flow_control ||
173 !(rs485conf->flags & SER_RS485_ENABLED))
174 return;
175
176 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
177 mctrl_gpio_set(stm32_port->gpios,
178 stm32_port->port.mctrl & ~TIOCM_RTS);
179 } else {
180 mctrl_gpio_set(stm32_port->gpios,
181 stm32_port->port.mctrl | TIOCM_RTS);
182 }
183 }
184
stm32_usart_config_reg_rs485(u32 * cr1,u32 * cr3,u32 delay_ADE,u32 delay_DDE,u32 baud)185 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
186 u32 delay_DDE, u32 baud)
187 {
188 u32 rs485_deat_dedt;
189 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
190 bool over8;
191
192 *cr3 |= USART_CR3_DEM;
193 over8 = *cr1 & USART_CR1_OVER8;
194
195 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
196
197 if (over8)
198 rs485_deat_dedt = delay_ADE * baud * 8;
199 else
200 rs485_deat_dedt = delay_ADE * baud * 16;
201
202 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
203 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
204 rs485_deat_dedt_max : rs485_deat_dedt;
205 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
206 USART_CR1_DEAT_MASK;
207 *cr1 |= rs485_deat_dedt;
208
209 if (over8)
210 rs485_deat_dedt = delay_DDE * baud * 8;
211 else
212 rs485_deat_dedt = delay_DDE * baud * 16;
213
214 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
215 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
216 rs485_deat_dedt_max : rs485_deat_dedt;
217 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
218 USART_CR1_DEDT_MASK;
219 *cr1 |= rs485_deat_dedt;
220 }
221
stm32_usart_config_rs485(struct uart_port * port,struct ktermios * termios,struct serial_rs485 * rs485conf)222 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
223 struct serial_rs485 *rs485conf)
224 {
225 struct stm32_port *stm32_port = to_stm32_port(port);
226 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
227 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
228 u32 usartdiv, baud, cr1, cr3;
229 bool over8;
230
231 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
232
233 if (rs485conf->flags & SER_RS485_ENABLED) {
234 cr1 = readl_relaxed(port->membase + ofs->cr1);
235 cr3 = readl_relaxed(port->membase + ofs->cr3);
236 usartdiv = readl_relaxed(port->membase + ofs->brr);
237 usartdiv = usartdiv & GENMASK(15, 0);
238 over8 = cr1 & USART_CR1_OVER8;
239
240 if (over8)
241 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
242 << USART_BRR_04_R_SHIFT;
243
244 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
245 stm32_usart_config_reg_rs485(&cr1, &cr3,
246 rs485conf->delay_rts_before_send,
247 rs485conf->delay_rts_after_send,
248 baud);
249
250 if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
251 cr3 &= ~USART_CR3_DEP;
252 else
253 cr3 |= USART_CR3_DEP;
254
255 writel_relaxed(cr3, port->membase + ofs->cr3);
256 writel_relaxed(cr1, port->membase + ofs->cr1);
257
258 if (!port->rs485_rx_during_tx_gpio)
259 rs485conf->flags |= SER_RS485_RX_DURING_TX;
260
261 } else {
262 stm32_usart_clr_bits(port, ofs->cr3,
263 USART_CR3_DEM | USART_CR3_DEP);
264 stm32_usart_clr_bits(port, ofs->cr1,
265 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
266 }
267
268 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
269
270 /* Adjust RTS polarity in case it's driven in software */
271 if (stm32_usart_tx_empty(port))
272 stm32_usart_rs485_rts_disable(port);
273 else
274 stm32_usart_rs485_rts_enable(port);
275
276 return 0;
277 }
278
stm32_usart_init_rs485(struct uart_port * port,struct platform_device * pdev)279 static int stm32_usart_init_rs485(struct uart_port *port,
280 struct platform_device *pdev)
281 {
282 struct serial_rs485 *rs485conf = &port->rs485;
283
284 rs485conf->flags = 0;
285 rs485conf->delay_rts_before_send = 0;
286 rs485conf->delay_rts_after_send = 0;
287
288 if (!pdev->dev.of_node)
289 return -ENODEV;
290
291 return uart_get_rs485_mode(port);
292 }
293
stm32_usart_rx_dma_started(struct stm32_port * stm32_port)294 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
295 {
296 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
297 }
298
stm32_usart_rx_dma_terminate(struct stm32_port * stm32_port)299 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
300 {
301 dmaengine_terminate_async(stm32_port->rx_ch);
302 stm32_port->rx_dma_busy = false;
303 }
304
stm32_usart_dma_pause_resume(struct stm32_port * stm32_port,struct dma_chan * chan,enum dma_status expected_status,int dmaengine_pause_or_resume (struct dma_chan *),bool stm32_usart_xx_dma_started (struct stm32_port *),void stm32_usart_xx_dma_terminate (struct stm32_port *))305 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
306 struct dma_chan *chan,
307 enum dma_status expected_status,
308 int dmaengine_pause_or_resume(struct dma_chan *),
309 bool stm32_usart_xx_dma_started(struct stm32_port *),
310 void stm32_usart_xx_dma_terminate(struct stm32_port *))
311 {
312 struct uart_port *port = &stm32_port->port;
313 enum dma_status dma_status;
314 int ret;
315
316 if (!stm32_usart_xx_dma_started(stm32_port))
317 return -EPERM;
318
319 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
320 if (dma_status != expected_status)
321 return -EAGAIN;
322
323 ret = dmaengine_pause_or_resume(chan);
324 if (ret) {
325 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
326 stm32_usart_xx_dma_terminate(stm32_port);
327 }
328 return ret;
329 }
330
stm32_usart_rx_dma_pause(struct stm32_port * stm32_port)331 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
332 {
333 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
334 DMA_IN_PROGRESS, dmaengine_pause,
335 stm32_usart_rx_dma_started,
336 stm32_usart_rx_dma_terminate);
337 }
338
stm32_usart_rx_dma_resume(struct stm32_port * stm32_port)339 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
340 {
341 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
342 DMA_PAUSED, dmaengine_resume,
343 stm32_usart_rx_dma_started,
344 stm32_usart_rx_dma_terminate);
345 }
346
347 /* Return true when data is pending (in pio mode), and false when no data is pending. */
stm32_usart_pending_rx_pio(struct uart_port * port,u32 * sr)348 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
349 {
350 struct stm32_port *stm32_port = to_stm32_port(port);
351 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
352
353 *sr = readl_relaxed(port->membase + ofs->isr);
354 /* Get pending characters in RDR or FIFO */
355 if (*sr & USART_SR_RXNE) {
356 /* Get all pending characters from the RDR or the FIFO when using interrupts */
357 if (!stm32_usart_rx_dma_started(stm32_port))
358 return true;
359
360 /* Handle only RX data errors when using DMA */
361 if (*sr & USART_SR_ERR_MASK)
362 return true;
363 }
364
365 return false;
366 }
367
stm32_usart_get_char_pio(struct uart_port * port)368 static u8 stm32_usart_get_char_pio(struct uart_port *port)
369 {
370 struct stm32_port *stm32_port = to_stm32_port(port);
371 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
372 unsigned long c;
373
374 c = readl_relaxed(port->membase + ofs->rdr);
375 /* Apply RDR data mask */
376 c &= stm32_port->rdr_mask;
377
378 return c;
379 }
380
stm32_usart_receive_chars_pio(struct uart_port * port)381 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
382 {
383 struct stm32_port *stm32_port = to_stm32_port(port);
384 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
385 unsigned int size = 0;
386 u32 sr;
387 u8 c, flag;
388
389 while (stm32_usart_pending_rx_pio(port, &sr)) {
390 sr |= USART_SR_DUMMY_RX;
391 flag = TTY_NORMAL;
392
393 /*
394 * Status bits has to be cleared before reading the RDR:
395 * In FIFO mode, reading the RDR will pop the next data
396 * (if any) along with its status bits into the SR.
397 * Not doing so leads to misalignement between RDR and SR,
398 * and clear status bits of the next rx data.
399 *
400 * Clear errors flags for stm32f7 and stm32h7 compatible
401 * devices. On stm32f4 compatible devices, the error bit is
402 * cleared by the sequence [read SR - read DR].
403 */
404 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
405 writel_relaxed(sr & USART_SR_ERR_MASK,
406 port->membase + ofs->icr);
407
408 c = stm32_usart_get_char_pio(port);
409 port->icount.rx++;
410 size++;
411 if (sr & USART_SR_ERR_MASK) {
412 if (sr & USART_SR_ORE) {
413 port->icount.overrun++;
414 } else if (sr & USART_SR_PE) {
415 port->icount.parity++;
416 } else if (sr & USART_SR_FE) {
417 /* Break detection if character is null */
418 if (!c) {
419 port->icount.brk++;
420 if (uart_handle_break(port))
421 continue;
422 } else {
423 port->icount.frame++;
424 }
425 }
426
427 sr &= port->read_status_mask;
428
429 if (sr & USART_SR_PE) {
430 flag = TTY_PARITY;
431 } else if (sr & USART_SR_FE) {
432 if (!c)
433 flag = TTY_BREAK;
434 else
435 flag = TTY_FRAME;
436 }
437 }
438
439 if (uart_prepare_sysrq_char(port, c))
440 continue;
441 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
442 }
443
444 return size;
445 }
446
stm32_usart_push_buffer_dma(struct uart_port * port,unsigned int dma_size)447 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
448 {
449 struct stm32_port *stm32_port = to_stm32_port(port);
450 struct tty_port *ttyport = &stm32_port->port.state->port;
451 unsigned char *dma_start;
452 int dma_count, i;
453
454 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
455
456 /*
457 * Apply rdr_mask on buffer in order to mask parity bit.
458 * This loop is useless in cs8 mode because DMA copies only
459 * 8 bits and already ignores parity bit.
460 */
461 if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
462 for (i = 0; i < dma_size; i++)
463 *(dma_start + i) &= stm32_port->rdr_mask;
464
465 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
466 port->icount.rx += dma_count;
467 if (dma_count != dma_size)
468 port->icount.buf_overrun++;
469 stm32_port->last_res -= dma_count;
470 if (stm32_port->last_res == 0)
471 stm32_port->last_res = RX_BUF_L;
472 }
473
stm32_usart_receive_chars_dma(struct uart_port * port)474 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
475 {
476 struct stm32_port *stm32_port = to_stm32_port(port);
477 unsigned int dma_size, size = 0;
478
479 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
480 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
481 /* Conditional first part: from last_res to end of DMA buffer */
482 dma_size = stm32_port->last_res;
483 stm32_usart_push_buffer_dma(port, dma_size);
484 size = dma_size;
485 }
486
487 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
488 stm32_usart_push_buffer_dma(port, dma_size);
489 size += dma_size;
490
491 return size;
492 }
493
stm32_usart_receive_chars(struct uart_port * port,bool force_dma_flush)494 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
495 {
496 struct stm32_port *stm32_port = to_stm32_port(port);
497 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
498 enum dma_status rx_dma_status;
499 u32 sr;
500 unsigned int size = 0;
501
502 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
503 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
504 stm32_port->rx_ch->cookie,
505 &stm32_port->rx_dma_state);
506 if (rx_dma_status == DMA_IN_PROGRESS ||
507 rx_dma_status == DMA_PAUSED) {
508 /* Empty DMA buffer */
509 size = stm32_usart_receive_chars_dma(port);
510 sr = readl_relaxed(port->membase + ofs->isr);
511 if (sr & USART_SR_ERR_MASK) {
512 /* Disable DMA request line */
513 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
514
515 /* Switch to PIO mode to handle the errors */
516 size += stm32_usart_receive_chars_pio(port);
517
518 /* Switch back to DMA mode */
519 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
520 }
521 } else {
522 /* Disable RX DMA */
523 stm32_usart_rx_dma_terminate(stm32_port);
524 /* Fall back to interrupt mode */
525 dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
526 size = stm32_usart_receive_chars_pio(port);
527 }
528 } else {
529 size = stm32_usart_receive_chars_pio(port);
530 }
531
532 return size;
533 }
534
stm32_usart_rx_dma_complete(void * arg)535 static void stm32_usart_rx_dma_complete(void *arg)
536 {
537 struct uart_port *port = arg;
538 struct tty_port *tport = &port->state->port;
539 unsigned int size;
540 unsigned long flags;
541
542 uart_port_lock_irqsave(port, &flags);
543 size = stm32_usart_receive_chars(port, false);
544 uart_unlock_and_check_sysrq_irqrestore(port, flags);
545 if (size)
546 tty_flip_buffer_push(tport);
547 }
548
stm32_usart_rx_dma_start_or_resume(struct uart_port * port)549 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
550 {
551 struct stm32_port *stm32_port = to_stm32_port(port);
552 struct dma_async_tx_descriptor *desc;
553 enum dma_status rx_dma_status;
554 int ret;
555
556 if (stm32_port->throttled)
557 return 0;
558
559 if (stm32_port->rx_dma_busy) {
560 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
561 stm32_port->rx_ch->cookie,
562 NULL);
563 if (rx_dma_status == DMA_IN_PROGRESS)
564 return 0;
565
566 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
567 return 0;
568
569 dev_err(port->dev, "DMA failed : status error.\n");
570 stm32_usart_rx_dma_terminate(stm32_port);
571 }
572
573 stm32_port->rx_dma_busy = true;
574
575 stm32_port->last_res = RX_BUF_L;
576 /* Prepare a DMA cyclic transaction */
577 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
578 stm32_port->rx_dma_buf,
579 RX_BUF_L, RX_BUF_P,
580 DMA_DEV_TO_MEM,
581 DMA_PREP_INTERRUPT);
582 if (!desc) {
583 dev_err(port->dev, "rx dma prep cyclic failed\n");
584 stm32_port->rx_dma_busy = false;
585 return -ENODEV;
586 }
587
588 desc->callback = stm32_usart_rx_dma_complete;
589 desc->callback_param = port;
590
591 /* Push current DMA transaction in the pending queue */
592 ret = dma_submit_error(dmaengine_submit(desc));
593 if (ret) {
594 dmaengine_terminate_sync(stm32_port->rx_ch);
595 stm32_port->rx_dma_busy = false;
596 return ret;
597 }
598
599 /* Issue pending DMA requests */
600 dma_async_issue_pending(stm32_port->rx_ch);
601
602 return 0;
603 }
604
stm32_usart_tx_dma_terminate(struct stm32_port * stm32_port)605 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
606 {
607 dmaengine_terminate_async(stm32_port->tx_ch);
608 stm32_port->tx_dma_busy = false;
609 }
610
stm32_usart_tx_dma_started(struct stm32_port * stm32_port)611 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
612 {
613 /*
614 * We cannot use the function "dmaengine_tx_status" to know the
615 * status of DMA. This function does not show if the "dma complete"
616 * callback of the DMA transaction has been called. So we prefer
617 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
618 * same time.
619 */
620 return stm32_port->tx_dma_busy;
621 }
622
stm32_usart_tx_dma_pause(struct stm32_port * stm32_port)623 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
624 {
625 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
626 DMA_IN_PROGRESS, dmaengine_pause,
627 stm32_usart_tx_dma_started,
628 stm32_usart_tx_dma_terminate);
629 }
630
stm32_usart_tx_dma_resume(struct stm32_port * stm32_port)631 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
632 {
633 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
634 DMA_PAUSED, dmaengine_resume,
635 stm32_usart_tx_dma_started,
636 stm32_usart_tx_dma_terminate);
637 }
638
stm32_usart_tx_dma_complete(void * arg)639 static void stm32_usart_tx_dma_complete(void *arg)
640 {
641 struct uart_port *port = arg;
642 struct stm32_port *stm32port = to_stm32_port(port);
643 unsigned long flags;
644
645 stm32_usart_tx_dma_terminate(stm32port);
646
647 /* Let's see if we have pending data to send */
648 uart_port_lock_irqsave(port, &flags);
649 stm32_usart_transmit_chars(port);
650 uart_port_unlock_irqrestore(port, flags);
651 }
652
stm32_usart_tx_interrupt_enable(struct uart_port * port)653 static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
654 {
655 struct stm32_port *stm32_port = to_stm32_port(port);
656 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
657
658 /*
659 * Enables TX FIFO threashold irq when FIFO is enabled,
660 * or TX empty irq when FIFO is disabled
661 */
662 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
663 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
664 else
665 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
666 }
667
stm32_usart_tc_interrupt_enable(struct uart_port * port)668 static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
669 {
670 struct stm32_port *stm32_port = to_stm32_port(port);
671 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
672
673 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
674 }
675
stm32_usart_tx_interrupt_disable(struct uart_port * port)676 static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
677 {
678 struct stm32_port *stm32_port = to_stm32_port(port);
679 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
680
681 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
682 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
683 else
684 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
685 }
686
stm32_usart_tc_interrupt_disable(struct uart_port * port)687 static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
688 {
689 struct stm32_port *stm32_port = to_stm32_port(port);
690 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
691
692 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
693 }
694
stm32_usart_transmit_chars_pio(struct uart_port * port)695 static void stm32_usart_transmit_chars_pio(struct uart_port *port)
696 {
697 struct stm32_port *stm32_port = to_stm32_port(port);
698 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
699 struct tty_port *tport = &port->state->port;
700
701 while (1) {
702 unsigned char ch;
703
704 /* Check that TDR is empty before filling FIFO */
705 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
706 break;
707
708 if (!uart_fifo_get(port, &ch))
709 break;
710
711 writel_relaxed(ch, port->membase + ofs->tdr);
712 }
713
714 /* rely on TXE irq (mask or unmask) for sending remaining data */
715 if (kfifo_is_empty(&tport->xmit_fifo))
716 stm32_usart_tx_interrupt_disable(port);
717 else
718 stm32_usart_tx_interrupt_enable(port);
719 }
720
stm32_usart_transmit_chars_dma(struct uart_port * port)721 static void stm32_usart_transmit_chars_dma(struct uart_port *port)
722 {
723 struct stm32_port *stm32port = to_stm32_port(port);
724 struct tty_port *tport = &port->state->port;
725 struct dma_async_tx_descriptor *desc = NULL;
726 unsigned int count;
727 int ret;
728
729 if (stm32_usart_tx_dma_started(stm32port)) {
730 ret = stm32_usart_tx_dma_resume(stm32port);
731 if (ret < 0 && ret != -EAGAIN)
732 goto fallback_err;
733 return;
734 }
735
736 count = kfifo_out_peek(&tport->xmit_fifo, &stm32port->tx_buf[0],
737 TX_BUF_L);
738
739 desc = dmaengine_prep_slave_single(stm32port->tx_ch,
740 stm32port->tx_dma_buf,
741 count,
742 DMA_MEM_TO_DEV,
743 DMA_PREP_INTERRUPT);
744
745 if (!desc)
746 goto fallback_err;
747
748 /*
749 * Set "tx_dma_busy" flag. This flag will be released when
750 * dmaengine_terminate_async will be called. This flag helps
751 * transmit_chars_dma not to start another DMA transaction
752 * if the callback of the previous is not yet called.
753 */
754 stm32port->tx_dma_busy = true;
755
756 desc->callback = stm32_usart_tx_dma_complete;
757 desc->callback_param = port;
758
759 /* Push current DMA TX transaction in the pending queue */
760 /* DMA no yet started, safe to free resources */
761 ret = dma_submit_error(dmaengine_submit(desc));
762 if (ret) {
763 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
764 stm32_usart_tx_dma_terminate(stm32port);
765 goto fallback_err;
766 }
767
768 /* Issue pending DMA TX requests */
769 dma_async_issue_pending(stm32port->tx_ch);
770
771 uart_xmit_advance(port, count);
772
773 return;
774
775 fallback_err:
776 stm32_usart_transmit_chars_pio(port);
777 }
778
stm32_usart_transmit_chars(struct uart_port * port)779 static void stm32_usart_transmit_chars(struct uart_port *port)
780 {
781 struct stm32_port *stm32_port = to_stm32_port(port);
782 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
783 struct tty_port *tport = &port->state->port;
784 u32 isr;
785 int ret;
786
787 if (!stm32_port->hw_flow_control &&
788 port->rs485.flags & SER_RS485_ENABLED &&
789 (port->x_char ||
790 !(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)))) {
791 stm32_usart_tc_interrupt_disable(port);
792 stm32_usart_rs485_rts_enable(port);
793 }
794
795 if (port->x_char) {
796 /* dma terminate may have been called in case of dma pause failure */
797 stm32_usart_tx_dma_pause(stm32_port);
798
799 /* Check that TDR is empty before filling FIFO */
800 ret =
801 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
802 isr,
803 (isr & USART_SR_TXE),
804 10, 1000);
805 if (ret)
806 dev_warn(port->dev, "1 character may be erased\n");
807
808 writel_relaxed(port->x_char, port->membase + ofs->tdr);
809 port->x_char = 0;
810 port->icount.tx++;
811
812 /* dma terminate may have been called in case of dma resume failure */
813 stm32_usart_tx_dma_resume(stm32_port);
814 return;
815 }
816
817 if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) {
818 stm32_usart_tx_interrupt_disable(port);
819 return;
820 }
821
822 if (ofs->icr == UNDEF_REG)
823 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
824 else
825 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
826
827 if (stm32_port->tx_ch)
828 stm32_usart_transmit_chars_dma(port);
829 else
830 stm32_usart_transmit_chars_pio(port);
831
832 if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
833 uart_write_wakeup(port);
834
835 if (kfifo_is_empty(&tport->xmit_fifo)) {
836 stm32_usart_tx_interrupt_disable(port);
837 if (!stm32_port->hw_flow_control &&
838 port->rs485.flags & SER_RS485_ENABLED) {
839 stm32_usart_tc_interrupt_enable(port);
840 }
841 }
842 }
843
stm32_usart_interrupt(int irq,void * ptr)844 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
845 {
846 struct uart_port *port = ptr;
847 struct tty_port *tport = &port->state->port;
848 struct stm32_port *stm32_port = to_stm32_port(port);
849 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
850 u32 sr;
851 unsigned int size;
852 irqreturn_t ret = IRQ_NONE;
853
854 sr = readl_relaxed(port->membase + ofs->isr);
855
856 if (!stm32_port->hw_flow_control &&
857 port->rs485.flags & SER_RS485_ENABLED &&
858 (sr & USART_SR_TC)) {
859 stm32_usart_tc_interrupt_disable(port);
860 stm32_usart_rs485_rts_disable(port);
861 ret = IRQ_HANDLED;
862 }
863
864 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
865 writel_relaxed(USART_ICR_RTOCF,
866 port->membase + ofs->icr);
867 ret = IRQ_HANDLED;
868 }
869
870 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
871 /* Clear wake up flag and disable wake up interrupt */
872 writel_relaxed(USART_ICR_WUCF,
873 port->membase + ofs->icr);
874 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
875 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
876 pm_wakeup_event(tport->tty->dev, 0);
877 ret = IRQ_HANDLED;
878 }
879
880 /*
881 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
882 * line has been masked by HW and rx data are stacking in FIFO.
883 */
884 if (!stm32_port->throttled) {
885 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
886 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
887 uart_port_lock(port);
888 size = stm32_usart_receive_chars(port, false);
889 uart_unlock_and_check_sysrq(port);
890 if (size)
891 tty_flip_buffer_push(tport);
892 ret = IRQ_HANDLED;
893 }
894 }
895
896 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
897 uart_port_lock(port);
898 stm32_usart_transmit_chars(port);
899 uart_port_unlock(port);
900 ret = IRQ_HANDLED;
901 }
902
903 /* Receiver timeout irq for DMA RX */
904 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
905 uart_port_lock(port);
906 size = stm32_usart_receive_chars(port, false);
907 uart_unlock_and_check_sysrq(port);
908 if (size)
909 tty_flip_buffer_push(tport);
910 ret = IRQ_HANDLED;
911 }
912
913 return ret;
914 }
915
stm32_usart_set_mctrl(struct uart_port * port,unsigned int mctrl)916 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
917 {
918 struct stm32_port *stm32_port = to_stm32_port(port);
919 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
920
921 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
922 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
923 else
924 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
925
926 mctrl_gpio_set(stm32_port->gpios, mctrl);
927 }
928
stm32_usart_get_mctrl(struct uart_port * port)929 static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
930 {
931 struct stm32_port *stm32_port = to_stm32_port(port);
932 unsigned int ret;
933
934 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */
935 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
936
937 return mctrl_gpio_get(stm32_port->gpios, &ret);
938 }
939
stm32_usart_enable_ms(struct uart_port * port)940 static void stm32_usart_enable_ms(struct uart_port *port)
941 {
942 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
943 }
944
stm32_usart_disable_ms(struct uart_port * port)945 static void stm32_usart_disable_ms(struct uart_port *port)
946 {
947 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
948 }
949
950 /* Transmit stop */
stm32_usart_stop_tx(struct uart_port * port)951 static void stm32_usart_stop_tx(struct uart_port *port)
952 {
953 struct stm32_port *stm32_port = to_stm32_port(port);
954
955 stm32_usart_tx_interrupt_disable(port);
956
957 /* dma terminate may have been called in case of dma pause failure */
958 stm32_usart_tx_dma_pause(stm32_port);
959
960 stm32_usart_rs485_rts_disable(port);
961 }
962
963 /* There are probably characters waiting to be transmitted. */
stm32_usart_start_tx(struct uart_port * port)964 static void stm32_usart_start_tx(struct uart_port *port)
965 {
966 struct tty_port *tport = &port->state->port;
967
968 if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char)
969 return;
970
971 stm32_usart_rs485_rts_enable(port);
972
973 stm32_usart_transmit_chars(port);
974 }
975
976 /* Flush the transmit buffer. */
stm32_usart_flush_buffer(struct uart_port * port)977 static void stm32_usart_flush_buffer(struct uart_port *port)
978 {
979 struct stm32_port *stm32_port = to_stm32_port(port);
980
981 if (stm32_port->tx_ch)
982 stm32_usart_tx_dma_terminate(stm32_port);
983 }
984
985 /* Throttle the remote when input buffer is about to overflow. */
stm32_usart_throttle(struct uart_port * port)986 static void stm32_usart_throttle(struct uart_port *port)
987 {
988 struct stm32_port *stm32_port = to_stm32_port(port);
989 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
990 unsigned long flags;
991
992 uart_port_lock_irqsave(port, &flags);
993
994 /*
995 * Pause DMA transfer, so the RX data gets queued into the FIFO.
996 * Hardware flow control is triggered when RX FIFO is full.
997 */
998 stm32_usart_rx_dma_pause(stm32_port);
999
1000 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1001 if (stm32_port->cr3_irq)
1002 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1003
1004 stm32_port->throttled = true;
1005 uart_port_unlock_irqrestore(port, flags);
1006 }
1007
1008 /* Unthrottle the remote, the input buffer can now accept data. */
stm32_usart_unthrottle(struct uart_port * port)1009 static void stm32_usart_unthrottle(struct uart_port *port)
1010 {
1011 struct stm32_port *stm32_port = to_stm32_port(port);
1012 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1013 unsigned long flags;
1014
1015 uart_port_lock_irqsave(port, &flags);
1016 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1017 if (stm32_port->cr3_irq)
1018 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1019
1020 stm32_port->throttled = false;
1021
1022 /*
1023 * Switch back to DMA mode (resume DMA).
1024 * Hardware flow control is stopped when FIFO is not full any more.
1025 */
1026 if (stm32_port->rx_ch)
1027 stm32_usart_rx_dma_start_or_resume(port);
1028
1029 uart_port_unlock_irqrestore(port, flags);
1030 }
1031
1032 /* Receive stop */
stm32_usart_stop_rx(struct uart_port * port)1033 static void stm32_usart_stop_rx(struct uart_port *port)
1034 {
1035 struct stm32_port *stm32_port = to_stm32_port(port);
1036 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1037
1038 /* Disable DMA request line. */
1039 stm32_usart_rx_dma_pause(stm32_port);
1040
1041 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1042 if (stm32_port->cr3_irq)
1043 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1044 }
1045
stm32_usart_break_ctl(struct uart_port * port,int break_state)1046 static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1047 {
1048 struct stm32_port *stm32_port = to_stm32_port(port);
1049 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1050 unsigned long flags;
1051
1052 uart_port_lock_irqsave(port, &flags);
1053
1054 if (break_state)
1055 stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1056 else
1057 stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1058
1059 uart_port_unlock_irqrestore(port, flags);
1060 }
1061
stm32_usart_startup(struct uart_port * port)1062 static int stm32_usart_startup(struct uart_port *port)
1063 {
1064 struct stm32_port *stm32_port = to_stm32_port(port);
1065 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1066 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1067 const char *name = to_platform_device(port->dev)->name;
1068 u32 val;
1069 int ret;
1070
1071 ret = request_irq(port->irq, stm32_usart_interrupt,
1072 IRQF_NO_SUSPEND, name, port);
1073 if (ret)
1074 return ret;
1075
1076 if (stm32_port->swap) {
1077 val = readl_relaxed(port->membase + ofs->cr2);
1078 val |= USART_CR2_SWAP;
1079 writel_relaxed(val, port->membase + ofs->cr2);
1080 }
1081 stm32_port->throttled = false;
1082
1083 /* RX FIFO Flush */
1084 if (ofs->rqr != UNDEF_REG)
1085 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1086
1087 if (stm32_port->rx_ch) {
1088 ret = stm32_usart_rx_dma_start_or_resume(port);
1089 if (ret) {
1090 free_irq(port->irq, port);
1091 return ret;
1092 }
1093 }
1094
1095 /* RX enabling */
1096 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1097 stm32_usart_set_bits(port, ofs->cr1, val);
1098
1099 return 0;
1100 }
1101
stm32_usart_shutdown(struct uart_port * port)1102 static void stm32_usart_shutdown(struct uart_port *port)
1103 {
1104 struct stm32_port *stm32_port = to_stm32_port(port);
1105 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1106 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1107 u32 val, isr;
1108 int ret;
1109
1110 if (stm32_usart_tx_dma_started(stm32_port))
1111 stm32_usart_tx_dma_terminate(stm32_port);
1112
1113 if (stm32_port->tx_ch)
1114 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1115
1116 /* Disable modem control interrupts */
1117 stm32_usart_disable_ms(port);
1118
1119 val = USART_CR1_TXEIE | USART_CR1_TE;
1120 val |= stm32_port->cr1_irq | USART_CR1_RE;
1121 val |= BIT(cfg->uart_enable_bit);
1122 if (stm32_port->fifoen)
1123 val |= USART_CR1_FIFOEN;
1124
1125 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1126 isr, (isr & USART_SR_TC),
1127 10, 100000);
1128
1129 /* Send the TC error message only when ISR_TC is not set */
1130 if (ret)
1131 dev_err(port->dev, "Transmission is not complete\n");
1132
1133 /* Disable RX DMA. */
1134 if (stm32_port->rx_ch) {
1135 stm32_usart_rx_dma_terminate(stm32_port);
1136 dmaengine_synchronize(stm32_port->rx_ch);
1137 }
1138
1139 /* flush RX & TX FIFO */
1140 if (ofs->rqr != UNDEF_REG)
1141 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1142 port->membase + ofs->rqr);
1143
1144 stm32_usart_clr_bits(port, ofs->cr1, val);
1145
1146 free_irq(port->irq, port);
1147 }
1148
1149 static const unsigned int stm32_usart_presc_val[] = {1, 2, 4, 6, 8, 10, 12, 16, 32, 64, 128, 256};
1150
stm32_usart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)1151 static void stm32_usart_set_termios(struct uart_port *port,
1152 struct ktermios *termios,
1153 const struct ktermios *old)
1154 {
1155 struct stm32_port *stm32_port = to_stm32_port(port);
1156 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1157 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1158 struct serial_rs485 *rs485conf = &port->rs485;
1159 unsigned int baud, bits, uart_clk, uart_clk_pres;
1160 u32 usartdiv, mantissa, fraction, oversampling;
1161 tcflag_t cflag = termios->c_cflag;
1162 u32 cr1, cr2, cr3, isr, brr, presc;
1163 unsigned long flags;
1164 int ret;
1165
1166 if (!stm32_port->hw_flow_control)
1167 cflag &= ~CRTSCTS;
1168
1169 uart_clk = clk_get_rate(stm32_port->clk);
1170
1171 baud = uart_get_baud_rate(port, termios, old, 0, uart_clk / 8);
1172
1173 uart_port_lock_irqsave(port, &flags);
1174
1175 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1176 isr,
1177 (isr & USART_SR_TC),
1178 10, 100000);
1179
1180 /* Send the TC error message only when ISR_TC is not set. */
1181 if (ret)
1182 dev_err(port->dev, "Transmission is not complete\n");
1183
1184 /* Stop serial port and reset value */
1185 writel_relaxed(0, port->membase + ofs->cr1);
1186
1187 /* flush RX & TX FIFO */
1188 if (ofs->rqr != UNDEF_REG)
1189 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1190 port->membase + ofs->rqr);
1191
1192 cr1 = USART_CR1_TE | USART_CR1_RE;
1193 if (stm32_port->fifoen)
1194 cr1 |= USART_CR1_FIFOEN;
1195 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1196
1197 /* Tx and RX FIFO configuration */
1198 cr3 = readl_relaxed(port->membase + ofs->cr3);
1199 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1200 if (stm32_port->fifoen) {
1201 if (stm32_port->txftcfg >= 0)
1202 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1203 if (stm32_port->rxftcfg >= 0)
1204 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1205 }
1206
1207 if (cflag & CSTOPB)
1208 cr2 |= USART_CR2_STOP_2B;
1209
1210 bits = tty_get_char_size(cflag);
1211 stm32_port->rdr_mask = (BIT(bits) - 1);
1212
1213 if (cflag & PARENB) {
1214 bits++;
1215 cr1 |= USART_CR1_PCE;
1216 }
1217
1218 /*
1219 * Word length configuration:
1220 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1221 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1222 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1223 * M0 and M1 already cleared by cr1 initialization.
1224 */
1225 if (bits == 9) {
1226 cr1 |= USART_CR1_M0;
1227 } else if ((bits == 7) && cfg->has_7bits_data) {
1228 cr1 |= USART_CR1_M1;
1229 } else if (bits != 8) {
1230 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1231 , bits);
1232 cflag &= ~CSIZE;
1233 cflag |= CS8;
1234 termios->c_cflag = cflag;
1235 bits = 8;
1236 if (cflag & PARENB) {
1237 bits++;
1238 cr1 |= USART_CR1_M0;
1239 }
1240 }
1241
1242 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1243 (stm32_port->fifoen &&
1244 stm32_port->rxftcfg >= 0))) {
1245 if (cflag & CSTOPB)
1246 bits = bits + 3; /* 1 start bit + 2 stop bits */
1247 else
1248 bits = bits + 2; /* 1 start bit + 1 stop bit */
1249
1250 /* RX timeout irq to occur after last stop bit + bits */
1251 stm32_port->cr1_irq = USART_CR1_RTOIE;
1252 writel_relaxed(bits, port->membase + ofs->rtor);
1253 cr2 |= USART_CR2_RTOEN;
1254 /*
1255 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1256 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1257 */
1258 stm32_port->cr3_irq = USART_CR3_RXFTIE;
1259 }
1260
1261 cr1 |= stm32_port->cr1_irq;
1262 cr3 |= stm32_port->cr3_irq;
1263
1264 if (cflag & PARODD)
1265 cr1 |= USART_CR1_PS;
1266
1267 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1268 if (cflag & CRTSCTS) {
1269 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1270 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1271 }
1272
1273 for (presc = 0; presc <= USART_PRESC_MAX; presc++) {
1274 uart_clk_pres = DIV_ROUND_CLOSEST(uart_clk, stm32_usart_presc_val[presc]);
1275 usartdiv = DIV_ROUND_CLOSEST(uart_clk_pres, baud);
1276
1277 /*
1278 * The USART supports 16 or 8 times oversampling.
1279 * By default we prefer 16 times oversampling, so that the receiver
1280 * has a better tolerance to clock deviations.
1281 * 8 times oversampling is only used to achieve higher speeds.
1282 */
1283 if (usartdiv < 16) {
1284 oversampling = 8;
1285 cr1 |= USART_CR1_OVER8;
1286 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1287 } else {
1288 oversampling = 16;
1289 cr1 &= ~USART_CR1_OVER8;
1290 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1291 }
1292
1293 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1294 fraction = usartdiv % oversampling;
1295 brr = mantissa | fraction;
1296
1297 if (FIELD_FIT(USART_BRR_MASK, brr)) {
1298 if (ofs->presc != UNDEF_REG) {
1299 port->uartclk = uart_clk_pres;
1300 writel_relaxed(presc, port->membase + ofs->presc);
1301 } else if (presc) {
1302 /* We need a prescaler but we don't have it (STM32F4, STM32F7) */
1303 dev_err(port->dev,
1304 "unable to set baudrate, input clock is too high");
1305 }
1306 break;
1307 } else if (presc == USART_PRESC_MAX) {
1308 /* Even with prescaler and brr at max value we can't set baudrate */
1309 dev_err(port->dev, "unable to set baudrate, input clock is too high");
1310 break;
1311 }
1312 }
1313
1314 writel_relaxed(brr, port->membase + ofs->brr);
1315
1316 uart_update_timeout(port, cflag, baud);
1317
1318 port->read_status_mask = USART_SR_ORE;
1319 if (termios->c_iflag & INPCK)
1320 port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1321 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1322 port->read_status_mask |= USART_SR_FE;
1323
1324 /* Characters to ignore */
1325 port->ignore_status_mask = 0;
1326 if (termios->c_iflag & IGNPAR)
1327 port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1328 if (termios->c_iflag & IGNBRK) {
1329 port->ignore_status_mask |= USART_SR_FE;
1330 /*
1331 * If we're ignoring parity and break indicators,
1332 * ignore overruns too (for real raw support).
1333 */
1334 if (termios->c_iflag & IGNPAR)
1335 port->ignore_status_mask |= USART_SR_ORE;
1336 }
1337
1338 /* Ignore all characters if CREAD is not set */
1339 if ((termios->c_cflag & CREAD) == 0)
1340 port->ignore_status_mask |= USART_SR_DUMMY_RX;
1341
1342 if (stm32_port->rx_ch) {
1343 /*
1344 * Setup DMA to collect only valid data and enable error irqs.
1345 * This also enables break reception when using DMA.
1346 */
1347 cr1 |= USART_CR1_PEIE;
1348 cr3 |= USART_CR3_EIE;
1349 cr3 |= USART_CR3_DMAR;
1350 cr3 |= USART_CR3_DDRE;
1351 }
1352
1353 if (stm32_port->tx_ch)
1354 cr3 |= USART_CR3_DMAT;
1355
1356 if (rs485conf->flags & SER_RS485_ENABLED) {
1357 stm32_usart_config_reg_rs485(&cr1, &cr3,
1358 rs485conf->delay_rts_before_send,
1359 rs485conf->delay_rts_after_send,
1360 baud);
1361 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1362 cr3 &= ~USART_CR3_DEP;
1363 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1364 } else {
1365 cr3 |= USART_CR3_DEP;
1366 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1367 }
1368
1369 } else {
1370 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1371 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1372 }
1373
1374 /* Configure wake up from low power on start bit detection */
1375 if (stm32_port->wakeup_src) {
1376 cr3 &= ~USART_CR3_WUS_MASK;
1377 cr3 |= USART_CR3_WUS_START_BIT;
1378 }
1379
1380 writel_relaxed(cr3, port->membase + ofs->cr3);
1381 writel_relaxed(cr2, port->membase + ofs->cr2);
1382 writel_relaxed(cr1, port->membase + ofs->cr1);
1383
1384 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1385 uart_port_unlock_irqrestore(port, flags);
1386
1387 /* Handle modem control interrupts */
1388 if (UART_ENABLE_MS(port, termios->c_cflag))
1389 stm32_usart_enable_ms(port);
1390 else
1391 stm32_usart_disable_ms(port);
1392 }
1393
stm32_usart_type(struct uart_port * port)1394 static const char *stm32_usart_type(struct uart_port *port)
1395 {
1396 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1397 }
1398
stm32_usart_release_port(struct uart_port * port)1399 static void stm32_usart_release_port(struct uart_port *port)
1400 {
1401 }
1402
stm32_usart_request_port(struct uart_port * port)1403 static int stm32_usart_request_port(struct uart_port *port)
1404 {
1405 return 0;
1406 }
1407
stm32_usart_config_port(struct uart_port * port,int flags)1408 static void stm32_usart_config_port(struct uart_port *port, int flags)
1409 {
1410 if (flags & UART_CONFIG_TYPE)
1411 port->type = PORT_STM32;
1412 }
1413
1414 static int
stm32_usart_verify_port(struct uart_port * port,struct serial_struct * ser)1415 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1416 {
1417 /* No user changeable parameters */
1418 return -EINVAL;
1419 }
1420
stm32_usart_pm(struct uart_port * port,unsigned int state,unsigned int oldstate)1421 static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1422 unsigned int oldstate)
1423 {
1424 struct stm32_port *stm32port = container_of(port,
1425 struct stm32_port, port);
1426 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1427 const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1428 unsigned long flags;
1429
1430 switch (state) {
1431 case UART_PM_STATE_ON:
1432 pm_runtime_get_sync(port->dev);
1433 break;
1434 case UART_PM_STATE_OFF:
1435 uart_port_lock_irqsave(port, &flags);
1436 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1437 uart_port_unlock_irqrestore(port, flags);
1438 pm_runtime_put_sync(port->dev);
1439 break;
1440 }
1441 }
1442
1443 #if defined(CONFIG_CONSOLE_POLL)
1444
1445 /* Callbacks for characters polling in debug context (i.e. KGDB). */
stm32_usart_poll_init(struct uart_port * port)1446 static int stm32_usart_poll_init(struct uart_port *port)
1447 {
1448 struct stm32_port *stm32_port = to_stm32_port(port);
1449
1450 return clk_prepare_enable(stm32_port->clk);
1451 }
1452
stm32_usart_poll_get_char(struct uart_port * port)1453 static int stm32_usart_poll_get_char(struct uart_port *port)
1454 {
1455 struct stm32_port *stm32_port = to_stm32_port(port);
1456 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1457
1458 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1459 return NO_POLL_CHAR;
1460
1461 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1462 }
1463
stm32_usart_poll_put_char(struct uart_port * port,unsigned char ch)1464 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1465 {
1466 stm32_usart_console_putchar(port, ch);
1467 }
1468 #endif /* CONFIG_CONSOLE_POLL */
1469
1470 static const struct uart_ops stm32_uart_ops = {
1471 .tx_empty = stm32_usart_tx_empty,
1472 .set_mctrl = stm32_usart_set_mctrl,
1473 .get_mctrl = stm32_usart_get_mctrl,
1474 .stop_tx = stm32_usart_stop_tx,
1475 .start_tx = stm32_usart_start_tx,
1476 .throttle = stm32_usart_throttle,
1477 .unthrottle = stm32_usart_unthrottle,
1478 .stop_rx = stm32_usart_stop_rx,
1479 .enable_ms = stm32_usart_enable_ms,
1480 .break_ctl = stm32_usart_break_ctl,
1481 .startup = stm32_usart_startup,
1482 .shutdown = stm32_usart_shutdown,
1483 .flush_buffer = stm32_usart_flush_buffer,
1484 .set_termios = stm32_usart_set_termios,
1485 .pm = stm32_usart_pm,
1486 .type = stm32_usart_type,
1487 .release_port = stm32_usart_release_port,
1488 .request_port = stm32_usart_request_port,
1489 .config_port = stm32_usart_config_port,
1490 .verify_port = stm32_usart_verify_port,
1491 #if defined(CONFIG_CONSOLE_POLL)
1492 .poll_init = stm32_usart_poll_init,
1493 .poll_get_char = stm32_usart_poll_get_char,
1494 .poll_put_char = stm32_usart_poll_put_char,
1495 #endif /* CONFIG_CONSOLE_POLL */
1496 };
1497
1498 struct stm32_usart_thresh_ratio {
1499 int mul;
1500 int div;
1501 };
1502
1503 static const struct stm32_usart_thresh_ratio stm32h7_usart_fifo_thresh_cfg[] = {
1504 {1, 8}, {1, 4}, {1, 2}, {3, 4}, {7, 8}, {1, 1} };
1505
stm32_usart_get_thresh_value(u32 fifo_size,int index)1506 static int stm32_usart_get_thresh_value(u32 fifo_size, int index)
1507 {
1508 return fifo_size * stm32h7_usart_fifo_thresh_cfg[index].mul /
1509 stm32h7_usart_fifo_thresh_cfg[index].div;
1510 }
1511
stm32_usart_get_ftcfg(struct platform_device * pdev,struct stm32_port * stm32port,const char * p,int * ftcfg)1512 static int stm32_usart_get_ftcfg(struct platform_device *pdev, struct stm32_port *stm32port,
1513 const char *p, int *ftcfg)
1514 {
1515 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1516 u32 bytes, i, cfg8;
1517 int fifo_size;
1518
1519 if (WARN_ON(ofs->hwcfgr1 == UNDEF_REG))
1520 return 1;
1521
1522 cfg8 = FIELD_GET(USART_HWCFGR1_CFG8,
1523 readl_relaxed(stm32port->port.membase + ofs->hwcfgr1));
1524
1525 /* On STM32H7, hwcfgr is not present, so returned value will be 0 */
1526 fifo_size = cfg8 ? 1 << cfg8 : STM32H7_USART_FIFO_SIZE;
1527
1528 /* DT option to get RX & TX FIFO threshold (default to half fifo size) */
1529 if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1530 bytes = fifo_size / 2;
1531
1532 if (bytes < stm32_usart_get_thresh_value(fifo_size, 0)) {
1533 *ftcfg = -EINVAL;
1534 return fifo_size;
1535 }
1536
1537 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) {
1538 if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes)
1539 break;
1540 }
1541 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1542 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1543
1544 dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p,
1545 stm32_usart_get_thresh_value(fifo_size, i), fifo_size);
1546
1547 *ftcfg = i;
1548 return fifo_size;
1549 }
1550
stm32_usart_deinit_port(struct stm32_port * stm32port)1551 static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1552 {
1553 clk_disable_unprepare(stm32port->clk);
1554 }
1555
1556 static const struct serial_rs485 stm32_rs485_supported = {
1557 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1558 SER_RS485_RX_DURING_TX,
1559 .delay_rts_before_send = 1,
1560 .delay_rts_after_send = 1,
1561 };
1562
stm32_usart_init_port(struct stm32_port * stm32port,struct platform_device * pdev)1563 static int stm32_usart_init_port(struct stm32_port *stm32port,
1564 struct platform_device *pdev)
1565 {
1566 struct uart_port *port = &stm32port->port;
1567 struct resource *res;
1568 int ret, irq;
1569
1570 irq = platform_get_irq(pdev, 0);
1571 if (irq < 0)
1572 return irq;
1573
1574 port->iotype = UPIO_MEM;
1575 port->flags = UPF_BOOT_AUTOCONF;
1576 port->ops = &stm32_uart_ops;
1577 port->dev = &pdev->dev;
1578 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1579 port->irq = irq;
1580 port->rs485_config = stm32_usart_config_rs485;
1581 port->rs485_supported = stm32_rs485_supported;
1582
1583 ret = stm32_usart_init_rs485(port, pdev);
1584 if (ret)
1585 return ret;
1586
1587 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1588 of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1589
1590 stm32port->swap = stm32port->info->cfg.has_swap &&
1591 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1592
1593 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1594 if (IS_ERR(port->membase))
1595 return PTR_ERR(port->membase);
1596 port->mapbase = res->start;
1597
1598 spin_lock_init(&port->lock);
1599
1600 stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1601 if (IS_ERR(stm32port->clk))
1602 return PTR_ERR(stm32port->clk);
1603
1604 /* Ensure that clk rate is correct by enabling the clk */
1605 ret = clk_prepare_enable(stm32port->clk);
1606 if (ret)
1607 return ret;
1608
1609 stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1610 if (!stm32port->port.uartclk) {
1611 ret = -EINVAL;
1612 goto err_clk;
1613 }
1614
1615 stm32port->fifoen = stm32port->info->cfg.has_fifo;
1616 if (stm32port->fifoen) {
1617 stm32_usart_get_ftcfg(pdev, stm32port, "rx-threshold", &stm32port->rxftcfg);
1618 port->fifosize = stm32_usart_get_ftcfg(pdev, stm32port, "tx-threshold",
1619 &stm32port->txftcfg);
1620 } else {
1621 port->fifosize = 1;
1622 }
1623
1624 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1625 if (IS_ERR(stm32port->gpios)) {
1626 ret = PTR_ERR(stm32port->gpios);
1627 goto err_clk;
1628 }
1629
1630 /*
1631 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1632 * properties should not be specified.
1633 */
1634 if (stm32port->hw_flow_control) {
1635 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1636 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1637 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1638 ret = -EINVAL;
1639 goto err_clk;
1640 }
1641 }
1642
1643 return ret;
1644
1645 err_clk:
1646 clk_disable_unprepare(stm32port->clk);
1647
1648 return ret;
1649 }
1650
stm32_usart_of_get_port(struct platform_device * pdev)1651 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1652 {
1653 struct device_node *np = pdev->dev.of_node;
1654 int id;
1655
1656 if (!np)
1657 return NULL;
1658
1659 id = of_alias_get_id(np, "serial");
1660 if (id < 0) {
1661 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1662 return NULL;
1663 }
1664
1665 if (WARN_ON(id >= STM32_MAX_PORTS))
1666 return NULL;
1667
1668 stm32_ports[id].hw_flow_control =
1669 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1670 of_property_read_bool (np, "uart-has-rtscts");
1671 stm32_ports[id].port.line = id;
1672 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1673 stm32_ports[id].cr3_irq = 0;
1674 stm32_ports[id].last_res = RX_BUF_L;
1675 return &stm32_ports[id];
1676 }
1677
1678 #ifdef CONFIG_OF
1679 static const struct of_device_id stm32_match[] = {
1680 { .compatible = "st,stm32-uart", .data = &stm32f4_info},
1681 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1682 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1683 {},
1684 };
1685
1686 MODULE_DEVICE_TABLE(of, stm32_match);
1687 #endif
1688
stm32_usart_of_dma_rx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1689 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1690 struct platform_device *pdev)
1691 {
1692 if (stm32port->rx_buf)
1693 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1694 stm32port->rx_dma_buf);
1695 }
1696
stm32_usart_of_dma_rx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1697 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1698 struct platform_device *pdev)
1699 {
1700 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1701 struct uart_port *port = &stm32port->port;
1702 struct device *dev = &pdev->dev;
1703 struct dma_slave_config config;
1704 int ret;
1705
1706 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1707 &stm32port->rx_dma_buf,
1708 GFP_KERNEL);
1709 if (!stm32port->rx_buf)
1710 return -ENOMEM;
1711
1712 /* Configure DMA channel */
1713 memset(&config, 0, sizeof(config));
1714 config.src_addr = port->mapbase + ofs->rdr;
1715 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1716
1717 ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1718 if (ret < 0) {
1719 dev_err(dev, "rx dma channel config failed\n");
1720 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1721 return ret;
1722 }
1723
1724 return 0;
1725 }
1726
stm32_usart_of_dma_tx_remove(struct stm32_port * stm32port,struct platform_device * pdev)1727 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1728 struct platform_device *pdev)
1729 {
1730 if (stm32port->tx_buf)
1731 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1732 stm32port->tx_dma_buf);
1733 }
1734
stm32_usart_of_dma_tx_probe(struct stm32_port * stm32port,struct platform_device * pdev)1735 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1736 struct platform_device *pdev)
1737 {
1738 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1739 struct uart_port *port = &stm32port->port;
1740 struct device *dev = &pdev->dev;
1741 struct dma_slave_config config;
1742 int ret;
1743
1744 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1745 &stm32port->tx_dma_buf,
1746 GFP_KERNEL);
1747 if (!stm32port->tx_buf)
1748 return -ENOMEM;
1749
1750 /* Configure DMA channel */
1751 memset(&config, 0, sizeof(config));
1752 config.dst_addr = port->mapbase + ofs->tdr;
1753 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1754
1755 ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1756 if (ret < 0) {
1757 dev_err(dev, "tx dma channel config failed\n");
1758 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1759 return ret;
1760 }
1761
1762 return 0;
1763 }
1764
stm32_usart_serial_probe(struct platform_device * pdev)1765 static int stm32_usart_serial_probe(struct platform_device *pdev)
1766 {
1767 struct stm32_port *stm32port;
1768 int ret;
1769
1770 stm32port = stm32_usart_of_get_port(pdev);
1771 if (!stm32port)
1772 return -ENODEV;
1773
1774 stm32port->info = of_device_get_match_data(&pdev->dev);
1775 if (!stm32port->info)
1776 return -EINVAL;
1777
1778 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1779 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1780 return -EPROBE_DEFER;
1781
1782 /* Fall back in interrupt mode for any non-deferral error */
1783 if (IS_ERR(stm32port->rx_ch))
1784 stm32port->rx_ch = NULL;
1785
1786 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1787 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1788 ret = -EPROBE_DEFER;
1789 goto err_dma_rx;
1790 }
1791 /* Fall back in interrupt mode for any non-deferral error */
1792 if (IS_ERR(stm32port->tx_ch))
1793 stm32port->tx_ch = NULL;
1794
1795 ret = stm32_usart_init_port(stm32port, pdev);
1796 if (ret)
1797 goto err_dma_tx;
1798
1799 if (stm32port->wakeup_src) {
1800 device_set_wakeup_capable(&pdev->dev, true);
1801 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1802 if (ret)
1803 goto err_deinit_port;
1804 }
1805
1806 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1807 /* Fall back in interrupt mode */
1808 dma_release_channel(stm32port->rx_ch);
1809 stm32port->rx_ch = NULL;
1810 }
1811
1812 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1813 /* Fall back in interrupt mode */
1814 dma_release_channel(stm32port->tx_ch);
1815 stm32port->tx_ch = NULL;
1816 }
1817
1818 if (!stm32port->rx_ch)
1819 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1820 if (!stm32port->tx_ch)
1821 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1822
1823 platform_set_drvdata(pdev, &stm32port->port);
1824
1825 pm_runtime_get_noresume(&pdev->dev);
1826 pm_runtime_set_active(&pdev->dev);
1827 pm_runtime_enable(&pdev->dev);
1828
1829 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1830 if (ret)
1831 goto err_port;
1832
1833 pm_runtime_put_sync(&pdev->dev);
1834
1835 return 0;
1836
1837 err_port:
1838 pm_runtime_disable(&pdev->dev);
1839 pm_runtime_set_suspended(&pdev->dev);
1840 pm_runtime_put_noidle(&pdev->dev);
1841
1842 if (stm32port->tx_ch)
1843 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1844 if (stm32port->rx_ch)
1845 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1846
1847 if (stm32port->wakeup_src)
1848 dev_pm_clear_wake_irq(&pdev->dev);
1849
1850 err_deinit_port:
1851 if (stm32port->wakeup_src)
1852 device_set_wakeup_capable(&pdev->dev, false);
1853
1854 stm32_usart_deinit_port(stm32port);
1855
1856 err_dma_tx:
1857 if (stm32port->tx_ch)
1858 dma_release_channel(stm32port->tx_ch);
1859
1860 err_dma_rx:
1861 if (stm32port->rx_ch)
1862 dma_release_channel(stm32port->rx_ch);
1863
1864 return ret;
1865 }
1866
stm32_usart_serial_remove(struct platform_device * pdev)1867 static void stm32_usart_serial_remove(struct platform_device *pdev)
1868 {
1869 struct uart_port *port = platform_get_drvdata(pdev);
1870 struct stm32_port *stm32_port = to_stm32_port(port);
1871 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1872 u32 cr3;
1873
1874 pm_runtime_get_sync(&pdev->dev);
1875 uart_remove_one_port(&stm32_usart_driver, port);
1876
1877 pm_runtime_disable(&pdev->dev);
1878 pm_runtime_set_suspended(&pdev->dev);
1879 pm_runtime_put_noidle(&pdev->dev);
1880
1881 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1882
1883 if (stm32_port->tx_ch) {
1884 stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1885 dma_release_channel(stm32_port->tx_ch);
1886 }
1887
1888 if (stm32_port->rx_ch) {
1889 stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1890 dma_release_channel(stm32_port->rx_ch);
1891 }
1892
1893 cr3 = readl_relaxed(port->membase + ofs->cr3);
1894 cr3 &= ~USART_CR3_EIE;
1895 cr3 &= ~USART_CR3_DMAR;
1896 cr3 &= ~USART_CR3_DMAT;
1897 cr3 &= ~USART_CR3_DDRE;
1898 writel_relaxed(cr3, port->membase + ofs->cr3);
1899
1900 if (stm32_port->wakeup_src) {
1901 dev_pm_clear_wake_irq(&pdev->dev);
1902 device_init_wakeup(&pdev->dev, false);
1903 }
1904
1905 stm32_usart_deinit_port(stm32_port);
1906 }
1907
stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)1908 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1909 {
1910 struct stm32_port *stm32_port = to_stm32_port(port);
1911 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1912 u32 isr;
1913 int ret;
1914
1915 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1916 (isr & USART_SR_TXE), 100,
1917 STM32_USART_TIMEOUT_USEC);
1918 if (ret != 0) {
1919 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1920 return;
1921 }
1922 writel_relaxed(ch, port->membase + ofs->tdr);
1923 }
1924
1925 #ifdef CONFIG_SERIAL_STM32_CONSOLE
stm32_usart_console_write(struct console * co,const char * s,unsigned int cnt)1926 static void stm32_usart_console_write(struct console *co, const char *s,
1927 unsigned int cnt)
1928 {
1929 struct uart_port *port = &stm32_ports[co->index].port;
1930 struct stm32_port *stm32_port = to_stm32_port(port);
1931 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1932 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1933 unsigned long flags;
1934 u32 old_cr1, new_cr1;
1935 int locked = 1;
1936
1937 if (oops_in_progress)
1938 locked = uart_port_trylock_irqsave(port, &flags);
1939 else
1940 uart_port_lock_irqsave(port, &flags);
1941
1942 /* Save and disable interrupts, enable the transmitter */
1943 old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1944 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1945 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
1946 writel_relaxed(new_cr1, port->membase + ofs->cr1);
1947
1948 uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1949
1950 /* Restore interrupt state */
1951 writel_relaxed(old_cr1, port->membase + ofs->cr1);
1952
1953 if (locked)
1954 uart_port_unlock_irqrestore(port, flags);
1955 }
1956
stm32_usart_console_setup(struct console * co,char * options)1957 static int stm32_usart_console_setup(struct console *co, char *options)
1958 {
1959 struct stm32_port *stm32port;
1960 int baud = 9600;
1961 int bits = 8;
1962 int parity = 'n';
1963 int flow = 'n';
1964
1965 if (co->index >= STM32_MAX_PORTS)
1966 return -ENODEV;
1967
1968 stm32port = &stm32_ports[co->index];
1969
1970 /*
1971 * This driver does not support early console initialization
1972 * (use ARM early printk support instead), so we only expect
1973 * this to be called during the uart port registration when the
1974 * driver gets probed and the port should be mapped at that point.
1975 */
1976 if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1977 return -ENXIO;
1978
1979 if (options)
1980 uart_parse_options(options, &baud, &parity, &bits, &flow);
1981
1982 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1983 }
1984
1985 static struct console stm32_console = {
1986 .name = STM32_SERIAL_NAME,
1987 .device = uart_console_device,
1988 .write = stm32_usart_console_write,
1989 .setup = stm32_usart_console_setup,
1990 .flags = CON_PRINTBUFFER,
1991 .index = -1,
1992 .data = &stm32_usart_driver,
1993 };
1994
1995 #define STM32_SERIAL_CONSOLE (&stm32_console)
1996
1997 #else
1998 #define STM32_SERIAL_CONSOLE NULL
1999 #endif /* CONFIG_SERIAL_STM32_CONSOLE */
2000
2001 #ifdef CONFIG_SERIAL_EARLYCON
early_stm32_usart_console_putchar(struct uart_port * port,unsigned char ch)2002 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
2003 {
2004 struct stm32_usart_info *info = port->private_data;
2005
2006 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
2007 cpu_relax();
2008
2009 writel_relaxed(ch, port->membase + info->ofs.tdr);
2010 }
2011
early_stm32_serial_write(struct console * console,const char * s,unsigned int count)2012 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
2013 {
2014 struct earlycon_device *device = console->data;
2015 struct uart_port *port = &device->port;
2016
2017 uart_console_write(port, s, count, early_stm32_usart_console_putchar);
2018 }
2019
early_stm32_h7_serial_setup(struct earlycon_device * device,const char * options)2020 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
2021 {
2022 if (!(device->port.membase || device->port.iobase))
2023 return -ENODEV;
2024 device->port.private_data = &stm32h7_info;
2025 device->con->write = early_stm32_serial_write;
2026 return 0;
2027 }
2028
early_stm32_f7_serial_setup(struct earlycon_device * device,const char * options)2029 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
2030 {
2031 if (!(device->port.membase || device->port.iobase))
2032 return -ENODEV;
2033 device->port.private_data = &stm32f7_info;
2034 device->con->write = early_stm32_serial_write;
2035 return 0;
2036 }
2037
early_stm32_f4_serial_setup(struct earlycon_device * device,const char * options)2038 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
2039 {
2040 if (!(device->port.membase || device->port.iobase))
2041 return -ENODEV;
2042 device->port.private_data = &stm32f4_info;
2043 device->con->write = early_stm32_serial_write;
2044 return 0;
2045 }
2046
2047 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
2048 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
2049 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
2050 #endif /* CONFIG_SERIAL_EARLYCON */
2051
2052 static struct uart_driver stm32_usart_driver = {
2053 .driver_name = DRIVER_NAME,
2054 .dev_name = STM32_SERIAL_NAME,
2055 .major = 0,
2056 .minor = 0,
2057 .nr = STM32_MAX_PORTS,
2058 .cons = STM32_SERIAL_CONSOLE,
2059 };
2060
stm32_usart_serial_en_wakeup(struct uart_port * port,bool enable)2061 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2062 bool enable)
2063 {
2064 struct stm32_port *stm32_port = to_stm32_port(port);
2065 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2066 struct tty_port *tport = &port->state->port;
2067 int ret;
2068 unsigned int size = 0;
2069 unsigned long flags;
2070
2071 if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2072 return 0;
2073
2074 /*
2075 * Enable low-power wake-up and wake-up irq if argument is set to
2076 * "enable", disable low-power wake-up and wake-up irq otherwise
2077 */
2078 if (enable) {
2079 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2080 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2081 mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2082
2083 /*
2084 * When DMA is used for reception, it must be disabled before
2085 * entering low-power mode and re-enabled when exiting from
2086 * low-power mode.
2087 */
2088 if (stm32_port->rx_ch) {
2089 uart_port_lock_irqsave(port, &flags);
2090 /* Poll data from DMA RX buffer if any */
2091 if (!stm32_usart_rx_dma_pause(stm32_port))
2092 size += stm32_usart_receive_chars(port, true);
2093 stm32_usart_rx_dma_terminate(stm32_port);
2094 uart_unlock_and_check_sysrq_irqrestore(port, flags);
2095 if (size)
2096 tty_flip_buffer_push(tport);
2097 }
2098
2099 /* Poll data from RX FIFO if any */
2100 stm32_usart_receive_chars(port, false);
2101 } else {
2102 if (stm32_port->rx_ch) {
2103 ret = stm32_usart_rx_dma_start_or_resume(port);
2104 if (ret)
2105 return ret;
2106 }
2107 mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2108 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2109 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2110 }
2111
2112 return 0;
2113 }
2114
stm32_usart_serial_suspend(struct device * dev)2115 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2116 {
2117 struct uart_port *port = dev_get_drvdata(dev);
2118 int ret;
2119
2120 uart_suspend_port(&stm32_usart_driver, port);
2121
2122 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2123 ret = stm32_usart_serial_en_wakeup(port, true);
2124 if (ret)
2125 return ret;
2126 }
2127
2128 /*
2129 * When "no_console_suspend" is enabled, keep the pinctrl default state
2130 * and rely on bootloader stage to restore this state upon resume.
2131 * Otherwise, apply the idle or sleep states depending on wakeup
2132 * capabilities.
2133 */
2134 if (console_suspend_enabled || !uart_console(port)) {
2135 if (device_may_wakeup(dev) || device_wakeup_path(dev))
2136 pinctrl_pm_select_idle_state(dev);
2137 else
2138 pinctrl_pm_select_sleep_state(dev);
2139 }
2140
2141 return 0;
2142 }
2143
stm32_usart_serial_resume(struct device * dev)2144 static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2145 {
2146 struct uart_port *port = dev_get_drvdata(dev);
2147 int ret;
2148
2149 pinctrl_pm_select_default_state(dev);
2150
2151 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2152 ret = stm32_usart_serial_en_wakeup(port, false);
2153 if (ret)
2154 return ret;
2155 }
2156
2157 return uart_resume_port(&stm32_usart_driver, port);
2158 }
2159
stm32_usart_runtime_suspend(struct device * dev)2160 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2161 {
2162 struct uart_port *port = dev_get_drvdata(dev);
2163 struct stm32_port *stm32port = container_of(port,
2164 struct stm32_port, port);
2165
2166 clk_disable_unprepare(stm32port->clk);
2167
2168 return 0;
2169 }
2170
stm32_usart_runtime_resume(struct device * dev)2171 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2172 {
2173 struct uart_port *port = dev_get_drvdata(dev);
2174 struct stm32_port *stm32port = container_of(port,
2175 struct stm32_port, port);
2176
2177 return clk_prepare_enable(stm32port->clk);
2178 }
2179
2180 static const struct dev_pm_ops stm32_serial_pm_ops = {
2181 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2182 stm32_usart_runtime_resume, NULL)
2183 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2184 stm32_usart_serial_resume)
2185 };
2186
2187 static struct platform_driver stm32_serial_driver = {
2188 .probe = stm32_usart_serial_probe,
2189 .remove = stm32_usart_serial_remove,
2190 .driver = {
2191 .name = DRIVER_NAME,
2192 .pm = &stm32_serial_pm_ops,
2193 .of_match_table = of_match_ptr(stm32_match),
2194 },
2195 };
2196
stm32_usart_init(void)2197 static int __init stm32_usart_init(void)
2198 {
2199 static char banner[] __initdata = "STM32 USART driver initialized";
2200 int ret;
2201
2202 pr_info("%s\n", banner);
2203
2204 ret = uart_register_driver(&stm32_usart_driver);
2205 if (ret)
2206 return ret;
2207
2208 ret = platform_driver_register(&stm32_serial_driver);
2209 if (ret)
2210 uart_unregister_driver(&stm32_usart_driver);
2211
2212 return ret;
2213 }
2214
stm32_usart_exit(void)2215 static void __exit stm32_usart_exit(void)
2216 {
2217 platform_driver_unregister(&stm32_serial_driver);
2218 uart_unregister_driver(&stm32_usart_driver);
2219 }
2220
2221 module_init(stm32_usart_init);
2222 module_exit(stm32_usart_exit);
2223
2224 MODULE_ALIAS("platform:" DRIVER_NAME);
2225 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2226 MODULE_LICENSE("GPL v2");
2227