amba-pl011.c (3d04d42312eacc68fbcddea337f7eb34bc035dfb) | amba-pl011.c (16052827d98fbc13c31ebad560af4bd53e2b4dd5) |
---|---|
1/* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA --- 37 unchanged lines hidden (view full) --- 46#include <linux/amba/bus.h> 47#include <linux/amba/serial.h> 48#include <linux/clk.h> 49#include <linux/slab.h> 50#include <linux/dmaengine.h> 51#include <linux/dma-mapping.h> 52#include <linux/scatterlist.h> 53#include <linux/delay.h> | 1/* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA --- 37 unchanged lines hidden (view full) --- 46#include <linux/amba/bus.h> 47#include <linux/amba/serial.h> 48#include <linux/clk.h> 49#include <linux/slab.h> 50#include <linux/dmaengine.h> 51#include <linux/dma-mapping.h> 52#include <linux/scatterlist.h> 53#include <linux/delay.h> |
54#include <linux/types.h> |
|
54 55#include <asm/io.h> 56#include <asm/sizes.h> 57 58#define UART_NR 14 59 60#define SERIAL_AMBA_MAJOR 204 61#define SERIAL_AMBA_MINOR 64 --- 92 unchanged lines hidden (view full) --- 154 struct clk *clk; 155 const struct vendor_data *vendor; 156 unsigned int dmacr; /* dma control reg */ 157 unsigned int im; /* interrupt mask */ 158 unsigned int old_status; 159 unsigned int fifosize; /* vendor-specific */ 160 unsigned int lcrh_tx; /* vendor-specific */ 161 unsigned int lcrh_rx; /* vendor-specific */ | 55 56#include <asm/io.h> 57#include <asm/sizes.h> 58 59#define UART_NR 14 60 61#define SERIAL_AMBA_MAJOR 204 62#define SERIAL_AMBA_MINOR 64 --- 92 unchanged lines hidden (view full) --- 155 struct clk *clk; 156 const struct vendor_data *vendor; 157 unsigned int dmacr; /* dma control reg */ 158 unsigned int im; /* interrupt mask */ 159 unsigned int old_status; 160 unsigned int fifosize; /* vendor-specific */ 161 unsigned int lcrh_tx; /* vendor-specific */ 162 unsigned int lcrh_rx; /* vendor-specific */ |
162 unsigned int old_cr; /* state during shutdown */ | |
163 bool autorts; 164 char type[12]; 165 bool interrupt_may_hang; /* vendor-specific */ 166#ifdef CONFIG_DMA_ENGINE 167 /* DMA stuff */ 168 bool using_tx_dma; 169 bool using_rx_dma; 170 struct pl011_dmarx_data dmarx; --- 95 unchanged lines hidden (view full) --- 266{ 267 /* DMA is the sole user of the platform data right now */ 268 struct amba_pl011_data *plat = uap->port.dev->platform_data; 269 struct dma_slave_config tx_conf = { 270 .dst_addr = uap->port.mapbase + UART01x_DR, 271 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 272 .direction = DMA_MEM_TO_DEV, 273 .dst_maxburst = uap->fifosize >> 1, | 163 bool autorts; 164 char type[12]; 165 bool interrupt_may_hang; /* vendor-specific */ 166#ifdef CONFIG_DMA_ENGINE 167 /* DMA stuff */ 168 bool using_tx_dma; 169 bool using_rx_dma; 170 struct pl011_dmarx_data dmarx; --- 95 unchanged lines hidden (view full) --- 266{ 267 /* DMA is the sole user of the platform data right now */ 268 struct amba_pl011_data *plat = uap->port.dev->platform_data; 269 struct dma_slave_config tx_conf = { 270 .dst_addr = uap->port.mapbase + UART01x_DR, 271 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 272 .direction = DMA_MEM_TO_DEV, 273 .dst_maxburst = uap->fifosize >> 1, |
274 .device_fc = false, |
|
274 }; 275 struct dma_chan *chan; 276 dma_cap_mask_t mask; 277 278 /* We need platform data */ 279 if (!plat || !plat->dma_filter) { 280 dev_info(uap->port.dev, "no DMA platform data\n"); 281 return; --- 17 unchanged lines hidden (view full) --- 299 300 /* Optionally make use of an RX channel as well */ 301 if (plat->dma_rx_param) { 302 struct dma_slave_config rx_conf = { 303 .src_addr = uap->port.mapbase + UART01x_DR, 304 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 305 .direction = DMA_DEV_TO_MEM, 306 .src_maxburst = uap->fifosize >> 1, | 275 }; 276 struct dma_chan *chan; 277 dma_cap_mask_t mask; 278 279 /* We need platform data */ 280 if (!plat || !plat->dma_filter) { 281 dev_info(uap->port.dev, "no DMA platform data\n"); 282 return; --- 17 unchanged lines hidden (view full) --- 300 301 /* Optionally make use of an RX channel as well */ 302 if (plat->dma_rx_param) { 303 struct dma_slave_config rx_conf = { 304 .src_addr = uap->port.mapbase + UART01x_DR, 305 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 306 .direction = DMA_DEV_TO_MEM, 307 .src_maxburst = uap->fifosize >> 1, |
308 .device_fc = false, |
|
307 }; 308 309 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 310 if (!chan) { 311 dev_err(uap->port.dev, "no RX DMA channel!\n"); 312 return; 313 } 314 --- 161 unchanged lines hidden (view full) --- 476 dmatx->sg.length = count; 477 478 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 479 uap->dmatx.queued = false; 480 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 481 return -EBUSY; 482 } 483 | 309 }; 310 311 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 312 if (!chan) { 313 dev_err(uap->port.dev, "no RX DMA channel!\n"); 314 return; 315 } 316 --- 161 unchanged lines hidden (view full) --- 478 dmatx->sg.length = count; 479 480 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 481 uap->dmatx.queued = false; 482 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 483 return -EBUSY; 484 } 485 |
484 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, | 486 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, |
485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 486 if (!desc) { 487 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 488 uap->dmatx.queued = false; 489 /* 490 * If DMA cannot be used right now, we complete this 491 * transaction via IRQ and let the TTY layer retry. 492 */ --- 166 unchanged lines hidden (view full) --- 659 } 660} 661 662static void pl011_dma_rx_callback(void *data); 663 664static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 665{ 666 struct dma_chan *rxchan = uap->dmarx.chan; | 487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 488 if (!desc) { 489 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 490 uap->dmatx.queued = false; 491 /* 492 * If DMA cannot be used right now, we complete this 493 * transaction via IRQ and let the TTY layer retry. 494 */ --- 166 unchanged lines hidden (view full) --- 661 } 662} 663 664static void pl011_dma_rx_callback(void *data); 665 666static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 667{ 668 struct dma_chan *rxchan = uap->dmarx.chan; |
667 struct dma_device *dma_dev; | |
668 struct pl011_dmarx_data *dmarx = &uap->dmarx; 669 struct dma_async_tx_descriptor *desc; 670 struct pl011_sgbuf *sgbuf; 671 672 if (!rxchan) 673 return -EIO; 674 675 /* Start the RX DMA job */ 676 sgbuf = uap->dmarx.use_buf_b ? 677 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | 669 struct pl011_dmarx_data *dmarx = &uap->dmarx; 670 struct dma_async_tx_descriptor *desc; 671 struct pl011_sgbuf *sgbuf; 672 673 if (!rxchan) 674 return -EIO; 675 676 /* Start the RX DMA job */ 677 sgbuf = uap->dmarx.use_buf_b ? 678 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
678 dma_dev = rxchan->device; 679 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, | 679 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, |
680 DMA_DEV_TO_MEM, 681 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 682 /* 683 * If the DMA engine is busy and cannot prepare a 684 * channel, no big deal, the driver will fall back 685 * to interrupt mode as a result of this error code. 686 */ 687 if (!desc) { --- 134 unchanged lines hidden (view full) --- 822 writew(uap->im, uap->port.membase + UART011_IMSC); 823 } 824} 825 826static void pl011_dma_rx_callback(void *data) 827{ 828 struct uart_amba_port *uap = data; 829 struct pl011_dmarx_data *dmarx = &uap->dmarx; | 680 DMA_DEV_TO_MEM, 681 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 682 /* 683 * If the DMA engine is busy and cannot prepare a 684 * channel, no big deal, the driver will fall back 685 * to interrupt mode as a result of this error code. 686 */ 687 if (!desc) { --- 134 unchanged lines hidden (view full) --- 822 writew(uap->im, uap->port.membase + UART011_IMSC); 823 } 824} 825 826static void pl011_dma_rx_callback(void *data) 827{ 828 struct uart_amba_port *uap = data; 829 struct pl011_dmarx_data *dmarx = &uap->dmarx; |
830 struct dma_chan *rxchan = dmarx->chan; | |
831 bool lastbuf = dmarx->use_buf_b; | 830 bool lastbuf = dmarx->use_buf_b; |
832 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 833 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 834 size_t pending; 835 struct dma_tx_state state; | |
836 int ret; 837 838 /* 839 * This completion interrupt occurs typically when the 840 * RX buffer is totally stuffed but no timeout has yet 841 * occurred. When that happens, we just want the RX 842 * routine to flush out the secondary DMA buffer while 843 * we immediately trigger the next DMA job. 844 */ 845 spin_lock_irq(&uap->port.lock); | 831 int ret; 832 833 /* 834 * This completion interrupt occurs typically when the 835 * RX buffer is totally stuffed but no timeout has yet 836 * occurred. When that happens, we just want the RX 837 * routine to flush out the secondary DMA buffer while 838 * we immediately trigger the next DMA job. 839 */ 840 spin_lock_irq(&uap->port.lock); |
846 /* 847 * Rx data can be taken by the UART interrupts during 848 * the DMA irq handler. So we check the residue here. 849 */ 850 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 851 pending = sgbuf->sg.length - state.residue; 852 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 853 /* Then we terminate the transfer - we now know our residue */ 854 dmaengine_terminate_all(rxchan); 855 | |
856 uap->dmarx.running = false; 857 dmarx->use_buf_b = !lastbuf; 858 ret = pl011_dma_rx_trigger_dma(uap); 859 | 841 uap->dmarx.running = false; 842 dmarx->use_buf_b = !lastbuf; 843 ret = pl011_dma_rx_trigger_dma(uap); 844 |
860 pl011_dma_rx_chars(uap, pending, lastbuf, false); | 845 pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false); |
861 spin_unlock_irq(&uap->port.lock); 862 /* 863 * Do this check after we picked the DMA chars so we don't 864 * get some IRQ immediately from RX. 865 */ 866 if (ret) { 867 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 868 "fall back to interrupt mode\n"); --- 522 unchanged lines hidden (view full) --- 1391 * Try to enable the clock producer. 1392 */ 1393 retval = clk_enable(uap->clk); 1394 if (retval) 1395 goto clk_unprep; 1396 1397 uap->port.uartclk = clk_get_rate(uap->clk); 1398 | 846 spin_unlock_irq(&uap->port.lock); 847 /* 848 * Do this check after we picked the DMA chars so we don't 849 * get some IRQ immediately from RX. 850 */ 851 if (ret) { 852 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 853 "fall back to interrupt mode\n"); --- 522 unchanged lines hidden (view full) --- 1376 * Try to enable the clock producer. 1377 */ 1378 retval = clk_enable(uap->clk); 1379 if (retval) 1380 goto clk_unprep; 1381 1382 uap->port.uartclk = clk_get_rate(uap->clk); 1383 |
1399 /* Clear pending error and receive interrupts */ 1400 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | 1401 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); 1402 | |
1403 /* 1404 * Allocate the IRQ 1405 */ 1406 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1407 if (retval) 1408 goto clk_dis; 1409 1410 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); --- 15 unchanged lines hidden (view full) --- 1426 for (i = 0; i < 10; ++i) 1427 writew(0xff, uap->port.membase + UART011_MIS); 1428 writew(0, uap->port.membase + uap->lcrh_tx); 1429 } 1430 writew(0, uap->port.membase + UART01x_DR); 1431 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1432 barrier(); 1433 | 1384 /* 1385 * Allocate the IRQ 1386 */ 1387 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1388 if (retval) 1389 goto clk_dis; 1390 1391 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); --- 15 unchanged lines hidden (view full) --- 1407 for (i = 0; i < 10; ++i) 1408 writew(0xff, uap->port.membase + UART011_MIS); 1409 writew(0, uap->port.membase + uap->lcrh_tx); 1410 } 1411 writew(0, uap->port.membase + UART01x_DR); 1412 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1413 barrier(); 1414 |
1434 /* restore RTS and DTR */ 1435 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1436 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; | 1415 cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; |
1437 writew(cr, uap->port.membase + UART011_CR); 1438 | 1416 writew(cr, uap->port.membase + UART011_CR); 1417 |
1418 /* Clear pending error interrupts */ 1419 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 1420 uap->port.membase + UART011_ICR); 1421 |
|
1439 /* 1440 * initialise the old status of the modem signals 1441 */ 1442 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1443 1444 /* Startup DMA */ 1445 pl011_dma_startup(uap); 1446 1447 /* 1448 * Finally, enable interrupts, only timeouts when using DMA 1449 * if initial RX DMA job failed, start in interrupt mode 1450 * as well. 1451 */ 1452 spin_lock_irq(&uap->port.lock); | 1422 /* 1423 * initialise the old status of the modem signals 1424 */ 1425 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1426 1427 /* Startup DMA */ 1428 pl011_dma_startup(uap); 1429 1430 /* 1431 * Finally, enable interrupts, only timeouts when using DMA 1432 * if initial RX DMA job failed, start in interrupt mode 1433 * as well. 1434 */ 1435 spin_lock_irq(&uap->port.lock); |
1453 /* Clear out any spuriously appearing RX interrupts */ 1454 writew(UART011_RTIS | UART011_RXIS, 1455 uap->port.membase + UART011_ICR); | |
1456 uap->im = UART011_RTIM; 1457 if (!pl011_dma_rx_running(uap)) 1458 uap->im |= UART011_RXIM; 1459 writew(uap->im, uap->port.membase + UART011_IMSC); 1460 spin_unlock_irq(&uap->port.lock); 1461 1462 if (uap->port.dev->platform_data) { 1463 struct amba_pl011_data *plat; --- 21 unchanged lines hidden (view full) --- 1485 val = readw(uap->port.membase + lcrh); 1486 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1487 writew(val, uap->port.membase + lcrh); 1488} 1489 1490static void pl011_shutdown(struct uart_port *port) 1491{ 1492 struct uart_amba_port *uap = (struct uart_amba_port *)port; | 1436 uap->im = UART011_RTIM; 1437 if (!pl011_dma_rx_running(uap)) 1438 uap->im |= UART011_RXIM; 1439 writew(uap->im, uap->port.membase + UART011_IMSC); 1440 spin_unlock_irq(&uap->port.lock); 1441 1442 if (uap->port.dev->platform_data) { 1443 struct amba_pl011_data *plat; --- 21 unchanged lines hidden (view full) --- 1465 val = readw(uap->port.membase + lcrh); 1466 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1467 writew(val, uap->port.membase + lcrh); 1468} 1469 1470static void pl011_shutdown(struct uart_port *port) 1471{ 1472 struct uart_amba_port *uap = (struct uart_amba_port *)port; |
1493 unsigned int cr; | |
1494 1495 /* 1496 * disable all interrupts 1497 */ 1498 spin_lock_irq(&uap->port.lock); 1499 uap->im = 0; 1500 writew(uap->im, uap->port.membase + UART011_IMSC); 1501 writew(0xffff, uap->port.membase + UART011_ICR); 1502 spin_unlock_irq(&uap->port.lock); 1503 1504 pl011_dma_shutdown(uap); 1505 1506 /* 1507 * Free the interrupt 1508 */ 1509 free_irq(uap->port.irq, uap); 1510 1511 /* 1512 * disable the port | 1473 1474 /* 1475 * disable all interrupts 1476 */ 1477 spin_lock_irq(&uap->port.lock); 1478 uap->im = 0; 1479 writew(uap->im, uap->port.membase + UART011_IMSC); 1480 writew(0xffff, uap->port.membase + UART011_ICR); 1481 spin_unlock_irq(&uap->port.lock); 1482 1483 pl011_dma_shutdown(uap); 1484 1485 /* 1486 * Free the interrupt 1487 */ 1488 free_irq(uap->port.irq, uap); 1489 1490 /* 1491 * disable the port |
1513 * disable the port. It should not disable RTS and DTR. 1514 * Also RTS and DTR state should be preserved to restore 1515 * it during startup(). | |
1516 */ 1517 uap->autorts = false; | 1492 */ 1493 uap->autorts = false; |
1518 cr = readw(uap->port.membase + UART011_CR); 1519 uap->old_cr = cr; 1520 cr &= UART011_CR_RTS | UART011_CR_DTR; 1521 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1522 writew(cr, uap->port.membase + UART011_CR); | 1494 writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR); |
1523 1524 /* 1525 * disable break condition and fifos 1526 */ 1527 pl011_shutdown_channel(uap, uap->lcrh_rx); 1528 if (uap->lcrh_rx != uap->lcrh_tx) 1529 pl011_shutdown_channel(uap, uap->lcrh_tx); 1530 --- 233 unchanged lines hidden (view full) --- 1764 writew(ch, uap->port.membase + UART01x_DR); 1765} 1766 1767static void 1768pl011_console_write(struct console *co, const char *s, unsigned int count) 1769{ 1770 struct uart_amba_port *uap = amba_ports[co->index]; 1771 unsigned int status, old_cr, new_cr; | 1495 1496 /* 1497 * disable break condition and fifos 1498 */ 1499 pl011_shutdown_channel(uap, uap->lcrh_rx); 1500 if (uap->lcrh_rx != uap->lcrh_tx) 1501 pl011_shutdown_channel(uap, uap->lcrh_tx); 1502 --- 233 unchanged lines hidden (view full) --- 1736 writew(ch, uap->port.membase + UART01x_DR); 1737} 1738 1739static void 1740pl011_console_write(struct console *co, const char *s, unsigned int count) 1741{ 1742 struct uart_amba_port *uap = amba_ports[co->index]; 1743 unsigned int status, old_cr, new_cr; |
1772 unsigned long flags; 1773 int locked = 1; | |
1774 1775 clk_enable(uap->clk); 1776 | 1744 1745 clk_enable(uap->clk); 1746 |
1777 local_irq_save(flags); 1778 if (uap->port.sysrq) 1779 locked = 0; 1780 else if (oops_in_progress) 1781 locked = spin_trylock(&uap->port.lock); 1782 else 1783 spin_lock(&uap->port.lock); 1784 | |
1785 /* 1786 * First save the CR then disable the interrupts 1787 */ 1788 old_cr = readw(uap->port.membase + UART011_CR); 1789 new_cr = old_cr & ~UART011_CR_CTSEN; 1790 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1791 writew(new_cr, uap->port.membase + UART011_CR); 1792 1793 uart_console_write(&uap->port, s, count, pl011_console_putchar); 1794 1795 /* 1796 * Finally, wait for transmitter to become empty 1797 * and restore the TCR 1798 */ 1799 do { 1800 status = readw(uap->port.membase + UART01x_FR); 1801 } while (status & UART01x_FR_BUSY); 1802 writew(old_cr, uap->port.membase + UART011_CR); 1803 | 1747 /* 1748 * First save the CR then disable the interrupts 1749 */ 1750 old_cr = readw(uap->port.membase + UART011_CR); 1751 new_cr = old_cr & ~UART011_CR_CTSEN; 1752 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1753 writew(new_cr, uap->port.membase + UART011_CR); 1754 1755 uart_console_write(&uap->port, s, count, pl011_console_putchar); 1756 1757 /* 1758 * Finally, wait for transmitter to become empty 1759 * and restore the TCR 1760 */ 1761 do { 1762 status = readw(uap->port.membase + UART01x_FR); 1763 } while (status & UART01x_FR_BUSY); 1764 writew(old_cr, uap->port.membase + UART011_CR); 1765 |
1804 if (locked) 1805 spin_unlock(&uap->port.lock); 1806 local_irq_restore(flags); 1807 | |
1808 clk_disable(uap->clk); 1809} 1810 1811static void __init 1812pl011_console_get_options(struct uart_amba_port *uap, int *baud, 1813 int *parity, int *bits) 1814{ 1815 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { --- 124 unchanged lines hidden (view full) --- 1940 } 1941 1942 uap->clk = clk_get(&dev->dev, NULL); 1943 if (IS_ERR(uap->clk)) { 1944 ret = PTR_ERR(uap->clk); 1945 goto unmap; 1946 } 1947 | 1766 clk_disable(uap->clk); 1767} 1768 1769static void __init 1770pl011_console_get_options(struct uart_amba_port *uap, int *baud, 1771 int *parity, int *bits) 1772{ 1773 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { --- 124 unchanged lines hidden (view full) --- 1898 } 1899 1900 uap->clk = clk_get(&dev->dev, NULL); 1901 if (IS_ERR(uap->clk)) { 1902 ret = PTR_ERR(uap->clk); 1903 goto unmap; 1904 } 1905 |
1948 /* Ensure interrupts from this UART are masked and cleared */ 1949 writew(0, uap->port.membase + UART011_IMSC); 1950 writew(0xffff, uap->port.membase + UART011_ICR); 1951 | |
1952 uap->vendor = vendor; 1953 uap->lcrh_rx = vendor->lcrh_rx; 1954 uap->lcrh_tx = vendor->lcrh_tx; | 1906 uap->vendor = vendor; 1907 uap->lcrh_rx = vendor->lcrh_rx; 1908 uap->lcrh_tx = vendor->lcrh_tx; |
1955 uap->old_cr = 0; | |
1956 uap->fifosize = vendor->fifosize; 1957 uap->interrupt_may_hang = vendor->interrupt_may_hang; 1958 uap->port.dev = &dev->dev; 1959 uap->port.mapbase = dev->res.start; 1960 uap->port.membase = base; 1961 uap->port.iotype = UPIO_MEM; 1962 uap->port.irq = dev->irq[0]; 1963 uap->port.fifosize = uap->fifosize; --- 126 unchanged lines hidden --- | 1909 uap->fifosize = vendor->fifosize; 1910 uap->interrupt_may_hang = vendor->interrupt_may_hang; 1911 uap->port.dev = &dev->dev; 1912 uap->port.mapbase = dev->res.start; 1913 uap->port.membase = base; 1914 uap->port.iotype = UPIO_MEM; 1915 uap->port.irq = dev->irq[0]; 1916 uap->port.fifosize = uap->fifosize; --- 126 unchanged lines hidden --- |