amba-pl011.c (0d4a42f6bd298e826620585e766a154ab460617a) | amba-pl011.c (cb06ff102e2d79a82cf780aa5e6947b2e0529ac0) |
---|---|
1/* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA --- 15 unchanged lines hidden (view full) --- 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 | 1/* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA --- 15 unchanged lines hidden (view full) --- 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 |
32 |
|
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 33#define SUPPORT_SYSRQ 34#endif 35 36#include <linux/module.h> 37#include <linux/ioport.h> 38#include <linux/init.h> 39#include <linux/console.h> --- 72 unchanged lines hidden (view full) --- 112struct pl011_dmarx_data { 113 struct dma_chan *chan; 114 struct completion complete; 115 bool use_buf_b; 116 struct pl011_sgbuf sgbuf_a; 117 struct pl011_sgbuf sgbuf_b; 118 dma_cookie_t cookie; 119 bool running; | 33#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 34#define SUPPORT_SYSRQ 35#endif 36 37#include <linux/module.h> 38#include <linux/ioport.h> 39#include <linux/init.h> 40#include <linux/console.h> --- 72 unchanged lines hidden (view full) --- 113struct pl011_dmarx_data { 114 struct dma_chan *chan; 115 struct completion complete; 116 bool use_buf_b; 117 struct pl011_sgbuf sgbuf_a; 118 struct pl011_sgbuf sgbuf_b; 119 dma_cookie_t cookie; 120 bool running; |
121 struct timer_list timer; 122 unsigned int last_residue; 123 unsigned long last_jiffies; 124 bool auto_poll_rate; 125 unsigned int poll_rate; 126 unsigned int poll_timeout; |
|
120}; 121 122struct pl011_dmatx_data { 123 struct dma_chan *chan; 124 struct scatterlist sg; 125 char *buf; 126 bool queued; 127}; --- 90 unchanged lines hidden (view full) --- 218 */ 219#ifdef CONFIG_DMA_ENGINE 220 221#define PL011_DMA_BUFFER_SIZE PAGE_SIZE 222 223static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 224 enum dma_data_direction dir) 225{ | 127}; 128 129struct pl011_dmatx_data { 130 struct dma_chan *chan; 131 struct scatterlist sg; 132 char *buf; 133 bool queued; 134}; --- 90 unchanged lines hidden (view full) --- 225 */ 226#ifdef CONFIG_DMA_ENGINE 227 228#define PL011_DMA_BUFFER_SIZE PAGE_SIZE 229 230static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 231 enum dma_data_direction dir) 232{ |
226 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); | 233 dma_addr_t dma_addr; 234 235 sg->buf = dma_alloc_coherent(chan->device->dev, 236 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); |
227 if (!sg->buf) 228 return -ENOMEM; 229 | 237 if (!sg->buf) 238 return -ENOMEM; 239 |
230 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); | 240 sg_init_table(&sg->sg, 1); 241 sg_set_page(&sg->sg, phys_to_page(dma_addr), 242 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); 243 sg_dma_address(&sg->sg) = dma_addr; |
231 | 244 |
232 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { 233 kfree(sg->buf); 234 return -EINVAL; 235 } | |
236 return 0; 237} 238 239static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 240 enum dma_data_direction dir) 241{ 242 if (sg->buf) { | 245 return 0; 246} 247 248static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 249 enum dma_data_direction dir) 250{ 251 if (sg->buf) { |
243 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); 244 kfree(sg->buf); | 252 dma_free_coherent(chan->device->dev, 253 PL011_DMA_BUFFER_SIZE, sg->buf, 254 sg_dma_address(&sg->sg)); |
245 } 246} 247 248static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 249{ 250 /* DMA is the sole user of the platform data right now */ 251 struct amba_pl011_data *plat = uap->port.dev->platform_data; 252 struct dma_slave_config tx_conf = { --- 42 unchanged lines hidden (view full) --- 295 if (!chan) { 296 dev_err(uap->port.dev, "no RX DMA channel!\n"); 297 return; 298 } 299 300 dmaengine_slave_config(chan, &rx_conf); 301 uap->dmarx.chan = chan; 302 | 255 } 256} 257 258static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 259{ 260 /* DMA is the sole user of the platform data right now */ 261 struct amba_pl011_data *plat = uap->port.dev->platform_data; 262 struct dma_slave_config tx_conf = { --- 42 unchanged lines hidden (view full) --- 305 if (!chan) { 306 dev_err(uap->port.dev, "no RX DMA channel!\n"); 307 return; 308 } 309 310 dmaengine_slave_config(chan, &rx_conf); 311 uap->dmarx.chan = chan; 312 |
313 if (plat->dma_rx_poll_enable) { 314 /* Set poll rate if specified. */ 315 if (plat->dma_rx_poll_rate) { 316 uap->dmarx.auto_poll_rate = false; 317 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 318 } else { 319 /* 320 * 100 ms defaults to poll rate if not 321 * specified. This will be adjusted with 322 * the baud rate at set_termios. 323 */ 324 uap->dmarx.auto_poll_rate = true; 325 uap->dmarx.poll_rate = 100; 326 } 327 /* 3 secs defaults poll_timeout if not specified. */ 328 if (plat->dma_rx_poll_timeout) 329 uap->dmarx.poll_timeout = 330 plat->dma_rx_poll_timeout; 331 else 332 uap->dmarx.poll_timeout = 3000; 333 } else 334 uap->dmarx.auto_poll_rate = false; 335 |
|
303 dev_info(uap->port.dev, "DMA channel RX %s\n", 304 dma_chan_name(uap->dmarx.chan)); 305 } 306} 307 308#ifndef MODULE 309/* 310 * Stack up the UARTs and let the above initcall be done at device --- 385 unchanged lines hidden (view full) --- 696 */ 697static void pl011_dma_rx_chars(struct uart_amba_port *uap, 698 u32 pending, bool use_buf_b, 699 bool readfifo) 700{ 701 struct tty_port *port = &uap->port.state->port; 702 struct pl011_sgbuf *sgbuf = use_buf_b ? 703 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | 336 dev_info(uap->port.dev, "DMA channel RX %s\n", 337 dma_chan_name(uap->dmarx.chan)); 338 } 339} 340 341#ifndef MODULE 342/* 343 * Stack up the UARTs and let the above initcall be done at device --- 385 unchanged lines hidden (view full) --- 729 */ 730static void pl011_dma_rx_chars(struct uart_amba_port *uap, 731 u32 pending, bool use_buf_b, 732 bool readfifo) 733{ 734 struct tty_port *port = &uap->port.state->port; 735 struct pl011_sgbuf *sgbuf = use_buf_b ? 736 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
704 struct device *dev = uap->dmarx.chan->device->dev; | |
705 int dma_count = 0; 706 u32 fifotaken = 0; /* only used for vdbg() */ 707 | 737 int dma_count = 0; 738 u32 fifotaken = 0; /* only used for vdbg() */ 739 |
708 /* Pick everything from the DMA first */ | 740 struct pl011_dmarx_data *dmarx = &uap->dmarx; 741 int dmataken = 0; 742 743 if (uap->dmarx.poll_rate) { 744 /* The data can be taken by polling */ 745 dmataken = sgbuf->sg.length - dmarx->last_residue; 746 /* Recalculate the pending size */ 747 if (pending >= dmataken) 748 pending -= dmataken; 749 } 750 751 /* Pick the remain data from the DMA */ |
709 if (pending) { | 752 if (pending) { |
710 /* Sync in buffer */ 711 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); | |
712 713 /* 714 * First take all chars in the DMA pipe, then look in the FIFO. 715 * Note that tty_insert_flip_buf() tries to take as many chars 716 * as it can. 717 */ | 753 754 /* 755 * First take all chars in the DMA pipe, then look in the FIFO. 756 * Note that tty_insert_flip_buf() tries to take as many chars 757 * as it can. 758 */ |
718 dma_count = tty_insert_flip_string(port, sgbuf->buf, pending); | 759 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 760 pending); |
719 | 761 |
720 /* Return buffer to device */ 721 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); 722 | |
723 uap->port.icount.rx += dma_count; 724 if (dma_count < pending) 725 dev_warn(uap->port.dev, 726 "couldn't insert all characters (TTY is full?)\n"); 727 } 728 | 762 uap->port.icount.rx += dma_count; 763 if (dma_count < pending) 764 dev_warn(uap->port.dev, 765 "couldn't insert all characters (TTY is full?)\n"); 766 } 767 |
768 /* Reset the last_residue for Rx DMA poll */ 769 if (uap->dmarx.poll_rate) 770 dmarx->last_residue = sgbuf->sg.length; 771 |
|
729 /* 730 * Only continue with trying to read the FIFO if all DMA chars have 731 * been taken first. 732 */ 733 if (dma_count == pending && readfifo) { 734 /* Clear any error flags */ 735 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 736 uap->port.membase + UART011_ICR); --- 123 unchanged lines hidden (view full) --- 860 */ 861static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 862{ 863 /* FIXME. Just disable the DMA enable */ 864 uap->dmacr &= ~UART011_RXDMAE; 865 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 866} 867 | 772 /* 773 * Only continue with trying to read the FIFO if all DMA chars have 774 * been taken first. 775 */ 776 if (dma_count == pending && readfifo) { 777 /* Clear any error flags */ 778 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 779 uap->port.membase + UART011_ICR); --- 123 unchanged lines hidden (view full) --- 903 */ 904static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 905{ 906 /* FIXME. Just disable the DMA enable */ 907 uap->dmacr &= ~UART011_RXDMAE; 908 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 909} 910 |
911/* 912 * Timer handler for Rx DMA polling. 913 * Every polling, It checks the residue in the dma buffer and transfer 914 * data to the tty. Also, last_residue is updated for the next polling. 915 */ 916static void pl011_dma_rx_poll(unsigned long args) 917{ 918 struct uart_amba_port *uap = (struct uart_amba_port *)args; 919 struct tty_port *port = &uap->port.state->port; 920 struct pl011_dmarx_data *dmarx = &uap->dmarx; 921 struct dma_chan *rxchan = uap->dmarx.chan; 922 unsigned long flags = 0; 923 unsigned int dmataken = 0; 924 unsigned int size = 0; 925 struct pl011_sgbuf *sgbuf; 926 int dma_count; 927 struct dma_tx_state state; 928 929 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 930 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 931 if (likely(state.residue < dmarx->last_residue)) { 932 dmataken = sgbuf->sg.length - dmarx->last_residue; 933 size = dmarx->last_residue - state.residue; 934 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 935 size); 936 if (dma_count == size) 937 dmarx->last_residue = state.residue; 938 dmarx->last_jiffies = jiffies; 939 } 940 tty_flip_buffer_push(port); 941 942 /* 943 * If no data is received in poll_timeout, the driver will fall back 944 * to interrupt mode. We will retrigger DMA at the first interrupt. 945 */ 946 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 947 > uap->dmarx.poll_timeout) { 948 949 spin_lock_irqsave(&uap->port.lock, flags); 950 pl011_dma_rx_stop(uap); 951 spin_unlock_irqrestore(&uap->port.lock, flags); 952 953 uap->dmarx.running = false; 954 dmaengine_terminate_all(rxchan); 955 del_timer(&uap->dmarx.timer); 956 } else { 957 mod_timer(&uap->dmarx.timer, 958 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 959 } 960} 961 |
|
868static void pl011_dma_startup(struct uart_amba_port *uap) 869{ 870 int ret; 871 872 if (!uap->dmatx.chan) 873 return; 874 875 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); --- 46 unchanged lines hidden (view full) --- 922 if (uap->vendor->dma_threshold) 923 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 924 uap->port.membase + ST_UART011_DMAWM); 925 926 if (uap->using_rx_dma) { 927 if (pl011_dma_rx_trigger_dma(uap)) 928 dev_dbg(uap->port.dev, "could not trigger initial " 929 "RX DMA job, fall back to interrupt mode\n"); | 962static void pl011_dma_startup(struct uart_amba_port *uap) 963{ 964 int ret; 965 966 if (!uap->dmatx.chan) 967 return; 968 969 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); --- 46 unchanged lines hidden (view full) --- 1016 if (uap->vendor->dma_threshold) 1017 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1018 uap->port.membase + ST_UART011_DMAWM); 1019 1020 if (uap->using_rx_dma) { 1021 if (pl011_dma_rx_trigger_dma(uap)) 1022 dev_dbg(uap->port.dev, "could not trigger initial " 1023 "RX DMA job, fall back to interrupt mode\n"); |
1024 if (uap->dmarx.poll_rate) { 1025 init_timer(&(uap->dmarx.timer)); 1026 uap->dmarx.timer.function = pl011_dma_rx_poll; 1027 uap->dmarx.timer.data = (unsigned long)uap; 1028 mod_timer(&uap->dmarx.timer, 1029 jiffies + 1030 msecs_to_jiffies(uap->dmarx.poll_rate)); 1031 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1032 uap->dmarx.last_jiffies = jiffies; 1033 } |
|
930 } 931} 932 933static void pl011_dma_shutdown(struct uart_amba_port *uap) 934{ 935 if (!(uap->using_tx_dma || uap->using_rx_dma)) 936 return; 937 --- 19 unchanged lines hidden (view full) --- 957 uap->using_tx_dma = false; 958 } 959 960 if (uap->using_rx_dma) { 961 dmaengine_terminate_all(uap->dmarx.chan); 962 /* Clean up the RX DMA */ 963 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 964 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); | 1034 } 1035} 1036 1037static void pl011_dma_shutdown(struct uart_amba_port *uap) 1038{ 1039 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1040 return; 1041 --- 19 unchanged lines hidden (view full) --- 1061 uap->using_tx_dma = false; 1062 } 1063 1064 if (uap->using_rx_dma) { 1065 dmaengine_terminate_all(uap->dmarx.chan); 1066 /* Clean up the RX DMA */ 1067 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1068 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); |
1069 if (uap->dmarx.poll_rate) 1070 del_timer_sync(&uap->dmarx.timer); |
|
965 uap->using_rx_dma = false; 966 } 967} 968 969static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 970{ 971 return uap->using_rx_dma; 972} 973 974static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 975{ 976 return uap->using_rx_dma && uap->dmarx.running; 977} 978 | 1071 uap->using_rx_dma = false; 1072 } 1073} 1074 1075static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1076{ 1077 return uap->using_rx_dma; 1078} 1079 1080static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1081{ 1082 return uap->using_rx_dma && uap->dmarx.running; 1083} 1084 |
979 | |
980#else 981/* Blank functions if the DMA engine is not available */ 982static inline void pl011_dma_probe(struct uart_amba_port *uap) 983{ 984} 985 986static inline void pl011_dma_remove(struct uart_amba_port *uap) 987{ --- 95 unchanged lines hidden (view full) --- 1083 * If we were temporarily out of DMA mode for a while, 1084 * attempt to switch back to DMA mode again. 1085 */ 1086 if (pl011_dma_rx_available(uap)) { 1087 if (pl011_dma_rx_trigger_dma(uap)) { 1088 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1089 "fall back to interrupt mode again\n"); 1090 uap->im |= UART011_RXIM; | 1085#else 1086/* Blank functions if the DMA engine is not available */ 1087static inline void pl011_dma_probe(struct uart_amba_port *uap) 1088{ 1089} 1090 1091static inline void pl011_dma_remove(struct uart_amba_port *uap) 1092{ --- 95 unchanged lines hidden (view full) --- 1188 * If we were temporarily out of DMA mode for a while, 1189 * attempt to switch back to DMA mode again. 1190 */ 1191 if (pl011_dma_rx_available(uap)) { 1192 if (pl011_dma_rx_trigger_dma(uap)) { 1193 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1194 "fall back to interrupt mode again\n"); 1195 uap->im |= UART011_RXIM; |
1091 } else | 1196 } else { |
1092 uap->im &= ~UART011_RXIM; | 1197 uap->im &= ~UART011_RXIM; |
1198 /* Start Rx DMA poll */ 1199 if (uap->dmarx.poll_rate) { 1200 uap->dmarx.last_jiffies = jiffies; 1201 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1202 mod_timer(&uap->dmarx.timer, 1203 jiffies + 1204 msecs_to_jiffies(uap->dmarx.poll_rate)); 1205 } 1206 } 1207 |
|
1093 writew(uap->im, uap->port.membase + UART011_IMSC); 1094 } 1095 spin_lock(&uap->port.lock); 1096} 1097 1098static void pl011_tx_chars(struct uart_amba_port *uap) 1099{ 1100 struct circ_buf *xmit = &uap->port.state->xmit; --- 58 unchanged lines hidden (view full) --- 1159{ 1160 struct uart_amba_port *uap = dev_id; 1161 unsigned long flags; 1162 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1163 int handled = 0; 1164 unsigned int dummy_read; 1165 1166 spin_lock_irqsave(&uap->port.lock, flags); | 1208 writew(uap->im, uap->port.membase + UART011_IMSC); 1209 } 1210 spin_lock(&uap->port.lock); 1211} 1212 1213static void pl011_tx_chars(struct uart_amba_port *uap) 1214{ 1215 struct circ_buf *xmit = &uap->port.state->xmit; --- 58 unchanged lines hidden (view full) --- 1274{ 1275 struct uart_amba_port *uap = dev_id; 1276 unsigned long flags; 1277 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1278 int handled = 0; 1279 unsigned int dummy_read; 1280 1281 spin_lock_irqsave(&uap->port.lock, flags); |
1167 | |
1168 status = readw(uap->port.membase + UART011_MIS); 1169 if (status) { 1170 do { 1171 if (uap->vendor->cts_event_workaround) { 1172 /* workaround to make sure that all bits are unlocked.. */ 1173 writew(0x00, uap->port.membase + UART011_ICR); 1174 1175 /* --- 370 unchanged lines hidden (view full) --- 1546 else 1547 clkdiv = 16; 1548 1549 /* 1550 * Ask the core to calculate the divisor for us. 1551 */ 1552 baud = uart_get_baud_rate(port, termios, old, 0, 1553 port->uartclk / clkdiv); | 1282 status = readw(uap->port.membase + UART011_MIS); 1283 if (status) { 1284 do { 1285 if (uap->vendor->cts_event_workaround) { 1286 /* workaround to make sure that all bits are unlocked.. */ 1287 writew(0x00, uap->port.membase + UART011_ICR); 1288 1289 /* --- 370 unchanged lines hidden (view full) --- 1660 else 1661 clkdiv = 16; 1662 1663 /* 1664 * Ask the core to calculate the divisor for us. 1665 */ 1666 baud = uart_get_baud_rate(port, termios, old, 0, 1667 port->uartclk / clkdiv); |
1668 /* 1669 * Adjust RX DMA polling rate with baud rate if not specified. 1670 */ 1671 if (uap->dmarx.auto_poll_rate) 1672 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); |
|
1554 1555 if (baud > port->uartclk/16) 1556 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1557 else 1558 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1559 1560 switch (termios->c_cflag & CSIZE) { 1561 case CS5: --- 580 unchanged lines hidden --- | 1673 1674 if (baud > port->uartclk/16) 1675 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1676 else 1677 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1678 1679 switch (termios->c_cflag & CSIZE) { 1680 case CS5: --- 580 unchanged lines hidden --- |